library(tidyverse)
library(psych)
library(rstan)    # to run the Bayesian model (stan)
library(coda)     # to obtain HPD intervals (HPDinterval)
library(mvtnorm)  # to generate random correlated data (rmvnorm)
library(car)      # to plot the inferred distribution (dataEllipse)
library(brms)
library(bayesplot)
library(gridExtra)
options(mc.cores = parallel::detectCores())
options (error=recover) 

library(foreign) # to read SPSS file
data <- here::here('1. Data/1. WA project (SPSS)-2019-6-11.sav') %>%
  read.spss(to.data.frame=TRUE)

data <- data[,-2]
source("2_Rcodes_2020-09-18/rob.cor.mcmc_additional_criteria.R") 


data$Rawfreq <- log(data$Rawfreq) 
data$Raw_first <- log(data$Raw_first)
data$RawFirst <- log(data$RawFirst)




attach(data)
head(data)
##    ID Nativelikeness     Comp Lex30raw  Lex30 Numberofresp  Rawfreq   tscore
## 1 111       7.538462 6.384615       41 43.158          104 6.282870 6.970751
## 2 112       6.230769 5.076923       35 40.697          108 5.984021 8.793760
## 3 113       6.384615 6.769231       33 36.667          109 6.569201 8.210262
## 4 114       7.230769 6.923077       30 30.928          108 6.047171 8.213042
## 5 115       7.307692 6.384615       22 30.986           95 6.382420 7.441458
## 6 116       6.230769 7.307692       34 44.156          101 6.265812 8.155772
##         MI      MI2  LogDice Raw_first tscore_first MI_first MI2_first
## 1 1.910758 8.469356 2.860707  6.081403     7.618452 2.485408  9.393354
## 2 1.985217 8.518691 2.810504  6.347457     9.508932 2.263757  8.956739
## 3 1.616796 8.709225 3.257821  6.962454     9.602330 1.688280  8.868220
## 4 1.608021 8.559300 3.163457  5.791183     9.877567 2.006507  9.066217
## 5 1.256642 8.087881 3.031709  7.093374    10.084976 1.164878  8.145006
## 6 1.555219 8.103584 2.897684  6.393533     6.363937 1.373691  7.789863
##   LogDice_first t_cutoff_2 MI_cutoff_0 MI_cutoff_2 MI_cutoff_3 LD_cutoff_2
## 1      3.328366  0.6250000   0.7211538   0.3557692   0.2884615   0.6346154
## 2      3.048217  0.6203704   0.7129630   0.3518519   0.2222222   0.5462963
## 3      3.415265  0.6238532   0.7155963   0.2844037   0.2110092   0.6055046
## 4      3.413932  0.5555556   0.7314815   0.3333333   0.1944444   0.6388889
## 5      3.105427  0.5473684   0.6315789   0.2842105   0.2000000   0.6315789
## 6      2.753349  0.5940594   0.7029703   0.3465347   0.2475248   0.6237624
##    tscore.2 tscore.10      MI.3        MI.7 Logdicegt7 DiversityMTLD
## 1 0.6250000 0.2500000 0.2884615 0.009615385 0.01923077        43.653
## 2 0.6203704 0.2777778 0.2222222 0.009259259 0.01851852        32.680
## 3 0.6238532 0.2385321 0.2110092 0.000000000 0.03669725        27.980
## 4 0.5555556 0.2962963 0.1944444 0.000000000 0.02777778        19.200
## 5 0.5473684 0.2631579 0.2000000 0.000000000 0.01052632        25.351
## 6 0.5940594 0.2376238 0.2475248 0.000000000 0.02970297        26.901
##   FrequencyJACETInfrequentwordratio WordCount COCA_spoken_Range_Log_AW
## 1                        0.10638298        74               -0.4132691
## 2                        0.08333333        82               -0.3847393
## 3                        0.09090909       141               -0.3543852
## 4                        0.10810811        93               -0.3621051
## 5                        0.00000000        94               -0.3030986
## 6                        0.02439024        74               -0.2600043
##   COCA_spoken_Frequency_Log_AW COCA_spoken_Bigram_Frequency_Log
## 1                     3.075455                         1.423177
## 2                     3.157143                         1.452227
## 3                     3.242869                         1.573254
## 4                     3.306088                         1.635040
## 5                     3.236288                         1.354882
## 6                     3.357921                         1.484601
##   COCA_spoken_Bigram_Range_Log COCA_spoken_Trigram_Frequency_Log
## 1                    -1.339891                         0.6517929
## 2                    -1.340273                         0.4822114
## 3                    -1.236915                         0.5668987
## 4                    -1.202289                         0.4332461
## 5                    -1.397088                         0.2487292
## 6                    -1.292880                         0.4127399
##   COCA_spoken_Trigram_Range_Log IndexCOCA_spoken_Frequency_Log_AW      FAC1_1
## 1                     -2.015825                          1.607021 -0.29832644
## 2                     -2.175634                          1.523029  0.01254624
## 3                     -2.082042                          1.910392  1.02337270
## 4                     -2.225078                          1.823994  1.67592462
## 5                     -2.388899                          1.903294 -0.54963879
## 6                     -2.227538                          1.882117  0.44157937
##       FAC2_1     FAC3_1 RawFirst RawSecond  RawThird RawFourth   T_First
## 1  0.5145524 -0.8557059 6.081403  146.6250 1091.6522  494.6667  7.618452
## 2 -0.8784494 -0.1260386 6.347457  288.0435  431.6087  231.5882  9.508932
## 3 -0.1905995  0.7445929 6.962454  645.1667  534.5455  543.3636  9.602330
## 4 -1.4208401  0.9929462 5.791183  455.8148  428.5455  491.7000  9.877567
## 5 -2.6610871  1.1167707 7.093374  250.5600  381.9474  280.4615 10.084976
## 6 -1.3281218  2.3686025 6.393533  686.4643  389.6087  263.0769  6.363937
##   T_Second   T_Third  T_Fourth MI_First1 MI_Second MI_Third MI_Fourth Richness
## 1 4.678420  5.889352 10.401449  2.485408  1.660840 1.361889  2.051415   0.3712
## 2 6.756471 11.396671  6.934716  2.263757  1.409582 2.347246  1.848211   0.1938
## 3 8.612824  6.567567  7.705351  1.688280  1.302220 1.531392  1.957641   0.3886
## 4 8.107667  5.619153  9.127916  2.006507  1.428980 1.205301  1.794612   0.2838
## 5 6.199212  6.685346  5.445098  1.164878  1.271848 1.507015  1.052054   0.2210
## 6 9.275682  9.772509  6.880444  1.373691  1.583914 1.878349  1.326667   0.1952
##     MTLD Fluency ArticulationRate FilledPause SilentPause invertedRawFreq
## 1 43.653  0.4408         85.23792  0.21621622   0.8783784       -535.3226
## 2 32.680  0.3126         62.73555  0.06097561   0.7439024       -397.0337
## 3 27.980  0.4506        138.67348  0.01418440   0.5390071       -712.8000
## 4 19.200  0.2978         93.89826  0.02150538   0.6451613       -422.9149
## 5 25.351  0.3328         73.65811  0.19148936   0.8936170       -591.3571
## 6 26.901  0.2382         73.79596  0.02702703   0.9189189       -526.2688
##   invertedFirstRawFreq
## 1            -437.6429
## 2            -571.0385
## 3           -1056.2222
## 4            -327.4000
## 5           -1203.9630
## 6            -597.9655
iter1 = 5000
iter2 = 9000

IC_plot <- function(obj_compare_loo, metric) {
  data <- as.data.frame(obj_compare_loo[, 7:8])
  names(data)[1] <- "ic" ## change name so that any info criterion can be plotted
  names(data)[2] <- "se"
  plot <- data %>% 
  data.frame() %>% 
  rownames_to_column(var = "model_name") %>% 
  ggplot(aes(x    = model_name, 
             y    = ic, 
             ymin = ic - se, 
             ymax = ic + se)) +
  geom_pointrange(shape = 21) +
  coord_flip() +
  labs(x = NULL, y = metric) +
  theme_classic() +
  theme(text             = element_text(family = "Courier"),
        axis.ticks.y     = element_blank(),
        panel.background = element_rect())
  return(plot)
}

model_comp <- function(Null_Model, Model_1, Model_2) {
  Null_Model <- add_criterion(Null_Model,  criterion = c("loo", "waic"))
  Model_1 <- add_criterion(Model_1,  criterion = c("loo", "waic"))
  Model_2 <- add_criterion(Model_2,  criterion = c("loo", "waic"))
  loos <- loo_compare(Null_Model, Model_1, Model_2, criterion  = "loo")
  waics <- loo_compare(Null_Model, Model_1, Model_2, criterion = "waic")

  loo_plot <- IC_plot(loos, "looic")
  waic_plot <- IC_plot(waics, "waic")
  return(list(as.data.frame(loos), as.data.frame(waics), loo_plot, waic_plot))
}


posterior_scatter <- function(model, xaxis, yaxis) {
  x = deparse(substitute(xaxis))
  y = deparse(substitute(yaxis))
  x.rand = as.data.frame(extract(model, c("x_rand"))[[1]])
  p <- ggplot(data, aes(x = xaxis, y = yaxis)) +
  geom_point() +
  stat_ellipse(x.rand, mapping = aes(V2, V1), type = 't', level = .95, fill ="#56B4E9", geom = 'polygon', alpha = .2) +
  stat_ellipse(x.rand, mapping = aes(V2, V1), type = 't', level = .5, fill ="#56B4E9", geom = 'polygon', alpha = .4) +
  theme_bw() +
  labs(caption = "Note. Points show the sample data points; The ellipses show the areas within which 50 and 95% of posterior draws fall.", x = x , y = y) 
 return(p)
}

very_weak_prior <- c(
    prior(student_t(3, 0, 10), class = b)
    )
very_weak_prior_cauchy <- c(
    prior(student_t(3, 0, 10), class = Intercept),
    prior(student_t(3, 0, 10), class = b),
    prior(cauchy(0, 10), class = sigma)
    )

weak_prior <- c(
    prior(student_t(3, 0, 1), class = b)
    )

weak_prior_cauchy <- c(
    prior(student_t(3, 0, 10), class = Intercept),
    prior(student_t(3, 0, 1), class = b),
    prior(cauchy(0, 10), class = sigma)
    )
sessionInfo()
## R version 3.6.3 (2020-02-29)
## Platform: x86_64-apple-darwin15.6.0 (64-bit)
## Running under: macOS Catalina 10.15.7
## 
## Matrix products: default
## BLAS:   /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRlapack.dylib
## 
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
## 
## attached base packages:
## [1] stats     graphics  grDevices utils     datasets  methods   base     
## 
## other attached packages:
##  [1] foreign_0.8-75       gridExtra_2.3        bayesplot_1.7.2     
##  [4] brms_2.13.5          Rcpp_1.0.5           car_3.0-9           
##  [7] carData_3.0-4        mvtnorm_1.1-1        coda_0.19-4         
## [10] rstan_2.21.2         StanHeaders_2.21.0-6 psych_2.0.9         
## [13] forcats_0.5.0        stringr_1.4.0        dplyr_1.0.2         
## [16] purrr_0.3.4          readr_1.3.1          tidyr_1.1.2         
## [19] tibble_3.0.4         ggplot2_3.3.2        tidyverse_1.3.0     
## 
## loaded via a namespace (and not attached):
##   [1] TH.data_1.0-10        colorspace_2.0-0      ellipsis_0.3.1       
##   [4] rio_0.5.16            ggridges_0.5.2        rprojroot_2.0.2      
##   [7] rsconnect_0.8.16      estimability_1.3      markdown_1.1         
##  [10] base64enc_0.1-3       fs_1.5.0              rstudioapi_0.13      
##  [13] DT_0.16               fansi_0.4.1           lubridate_1.7.9      
##  [16] xml2_1.3.2            splines_3.6.3         bridgesampling_1.0-0 
##  [19] codetools_0.2-16      mnormt_1.5-7          knitr_1.30           
##  [22] shinythemes_1.1.2     jsonlite_1.7.2        broom_0.7.0          
##  [25] dbplyr_1.4.4          shiny_1.5.0           compiler_3.6.3       
##  [28] httr_1.4.2            emmeans_1.5.3         backports_1.1.10     
##  [31] assertthat_0.2.1      Matrix_1.2-18         fastmap_1.0.1        
##  [34] cli_2.2.0             later_1.1.0.1         htmltools_0.5.0      
##  [37] prettyunits_1.1.1     tools_3.6.3           igraph_1.2.5         
##  [40] gtable_0.3.0          glue_1.4.2            reshape2_1.4.4       
##  [43] V8_3.2.0              cellranger_1.1.0      vctrs_0.3.5          
##  [46] nlme_3.1-144          crosstalk_1.1.0.1     xfun_0.19            
##  [49] ps_1.5.0              openxlsx_4.1.5        rvest_0.3.6          
##  [52] miniUI_0.1.1.1        mime_0.9              lifecycle_0.2.0      
##  [55] gtools_3.8.2          MASS_7.3-51.5         zoo_1.8-8            
##  [58] scales_1.1.1          colourpicker_1.0      hms_0.5.3            
##  [61] promises_1.1.1        Brobdingnag_1.2-6     sandwich_2.5-1       
##  [64] parallel_3.6.3        inline_0.3.16         shinystan_2.5.0      
##  [67] yaml_2.2.1            curl_4.3              loo_2.3.1            
##  [70] stringi_1.5.3         dygraphs_1.1.1.6      pkgbuild_1.1.0       
##  [73] zip_2.1.1             rlang_0.4.9           pkgconfig_2.0.3      
##  [76] matrixStats_0.57.0    evaluate_0.14         lattice_0.20-38      
##  [79] rstantools_2.1.1.9000 htmlwidgets_1.5.3     processx_3.4.5       
##  [82] tidyselect_1.1.0      here_0.1              plyr_1.8.6           
##  [85] magrittr_2.0.1        R6_2.5.0              generics_0.1.0       
##  [88] multcomp_1.4-14       DBI_1.1.0             pillar_1.4.7         
##  [91] haven_2.3.1           withr_2.3.0           xts_0.12-0           
##  [94] survival_3.2-7        abind_1.4-5           modelr_0.1.8         
##  [97] crayon_1.3.4          rmarkdown_2.3         grid_3.6.3           
## [100] readxl_1.3.1          data.table_1.12.8     blob_1.2.1           
## [103] callr_3.5.1           threejs_0.3.3         reprex_0.3.0         
## [106] digest_0.6.27         xtable_1.8-4          httpuv_1.5.4         
## [109] RcppParallel_5.0.2    stats4_3.6.3          munsell_0.5.0        
## [112] shinyjs_1.1
describe(data)
##                                   vars  n    mean     sd  median trimmed    mad
## ID                                   1 40  214.40  71.74  211.50  214.47 134.92
## Nativelikeness                       2 40    5.40   1.34    5.58    5.46   1.43
## Comp                                 3 40    4.84   1.59    4.92    4.91   1.82
## Lex30raw                             4 40   45.30  10.53   45.00   45.50  11.12
## Lex30                                5 40   44.21   7.49   43.87   44.12   8.25
## Numberofresp                         6 40  110.25   7.88  112.00  111.28   5.93
## Rawfreq                              7 40    5.92   0.34    5.99    5.92   0.41
## tscore                               8 40    7.49   1.93    7.69    7.48   2.04
## MI                                   9 40    1.82   0.33    1.82    1.82   0.33
## MI2                                 10 40    8.35   0.59    8.45    8.35   0.56
## LogDice                             11 40    2.87   0.34    2.88    2.88   0.37
## Raw_first                           12 40    5.98   0.57    5.90    5.94   0.63
## tscore_first                        13 40    8.63   3.36    8.42    8.49   3.24
## MI_first                            14 40    2.02   0.48    2.04    2.02   0.51
## MI2_first                           15 40    8.72   0.93    8.91    8.69   0.97
## LogDice_first                       16 40    3.08   0.54    3.10    3.07   0.55
## t_cutoff_2                          17 40    0.58   0.09    0.60    0.57   0.08
## MI_cutoff_0                         18 40    0.68   0.08    0.68    0.67   0.07
## MI_cutoff_2                         19 40    0.36   0.07    0.35    0.36   0.07
## MI_cutoff_3                         20 40    0.25   0.06    0.24    0.25   0.05
## LD_cutoff_2                         21 40    0.58   0.08    0.60    0.58   0.08
## tscore.2                            22 40    0.58   0.09    0.60    0.57   0.08
## tscore.10                           23 40    0.26   0.06    0.25    0.25   0.06
## MI.3                                24 40    0.25   0.06    0.24    0.25   0.05
## MI.7                                25 40    0.01   0.01    0.01    0.01   0.01
## Logdicegt7                          26 40    0.02   0.01    0.02    0.02   0.01
## DiversityMTLD                       27 40   37.09  10.21   36.92   36.76  12.83
## FrequencyJACETInfrequentwordratio   28 40    0.08   0.03    0.09    0.08   0.03
## WordCount                           29 40  120.45  39.74  113.50  118.12  34.10
## COCA_spoken_Range_Log_AW            30 40   -0.38   0.05   -0.37   -0.38   0.04
## COCA_spoken_Frequency_Log_AW        31 40    3.16   0.09    3.17    3.17   0.07
## COCA_spoken_Bigram_Frequency_Log    32 40    1.46   0.11    1.47    1.46   0.14
## COCA_spoken_Bigram_Range_Log        33 40   -1.33   0.10   -1.33   -1.33   0.10
## COCA_spoken_Trigram_Frequency_Log   34 40    0.59   0.12    0.56    0.59   0.13
## COCA_spoken_Trigram_Range_Log       35 40   -2.07   0.11   -2.09   -2.07   0.12
## IndexCOCA_spoken_Frequency_Log_AW   36 40    1.64   0.14    1.62    1.64   0.16
## FAC1_1                              37 40    0.00   1.00    0.06    0.00   1.07
## FAC2_1                              38 40    0.00   1.00   -0.21    0.00   1.06
## FAC3_1                              39 40    0.00   1.00    0.10    0.08   0.86
## RawFirst                            40 40    5.98   0.57    5.90    5.94   0.63
## RawSecond                           41 40  396.85 232.65  320.90  379.02 231.12
## RawThird                            42 40  360.47 214.31  339.50  333.08 162.22
## RawFourth                           43 40  324.07 161.08  277.69  304.73  97.66
## T_First                             44 40    8.63   3.36    8.42    8.49   3.24
## T_Second                            45 40    7.74   3.04    7.59    7.66   2.46
## T_Third                             46 40    6.92   2.57    6.91    6.98   2.08
## T_Fourth                            47 40    6.29   3.25    6.87    6.48   2.18
## MI_First1                           48 40    2.02   0.48    2.04    2.02   0.51
## MI_Second                           49 40    1.82   0.48    1.81    1.82   0.56
## MI_Third                            50 40    1.75   0.45    1.67    1.73   0.39
## MI_Fourth                           51 40    1.66   0.57    1.80    1.70   0.63
## Richness                            52 40    0.42   0.12    0.44    0.42   0.12
## MTLD                                53 40   37.09  10.21   36.92   36.76  12.83
## Fluency                             54 40    0.52   0.17    0.51    0.52   0.22
## ArticulationRate                    55 40  109.12  26.62  107.95  108.73  25.60
## FilledPause                         56 40    0.07   0.06    0.05    0.06   0.05
## SilentPause                         57 40    0.60   0.21    0.56    0.59   0.21
## invertedRawFreq                     58 40 -394.23 129.28 -397.70 -386.18 165.60
## invertedFirstRawFreq                59 40 -466.11 294.68 -366.30 -413.67 223.64
##                                        min     max   range  skew kurtosis    se
## ID                                  111.00  318.00  207.00  0.01    -1.19 11.34
## Nativelikeness                        2.00    7.54    5.54 -0.43    -0.69  0.21
## Comp                                  1.38    7.31    5.92 -0.31    -1.01  0.25
## Lex30raw                             22.00   65.00   43.00 -0.09    -0.86  1.67
## Lex30                                30.93   61.91   30.98  0.19    -0.73  1.18
## Numberofresp                         85.00  120.00   35.00 -1.17     1.07  1.25
## Rawfreq                               5.30    6.57    1.27 -0.09    -1.25  0.05
## tscore                                4.10   12.30    8.20  0.07    -0.51  0.30
## MI                                    1.19    2.60    1.41  0.00    -0.60  0.05
## MI2                                   7.17    9.49    2.31 -0.05    -0.87  0.09
## LogDice                               2.16    3.47    1.31 -0.26    -0.85  0.05
## Raw_first                             5.14    7.15    2.01  0.38    -0.82  0.09
## tscore_first                          1.93   17.04   15.11  0.28    -0.46  0.53
## MI_first                              1.16    3.25    2.08  0.12    -0.47  0.08
## MI2_first                             7.10   10.94    3.84  0.17    -0.65  0.15
## LogDice_first                         1.90    4.32    2.42  0.04    -0.53  0.09
## t_cutoff_2                            0.44    0.78    0.34  0.10    -0.74  0.01
## MI_cutoff_0                           0.54    0.85    0.31  0.09    -0.57  0.01
## MI_cutoff_2                           0.26    0.50    0.24  0.47    -0.63  0.01
## MI_cutoff_3                           0.15    0.36    0.22  0.29    -0.55  0.01
## LD_cutoff_2                           0.39    0.71    0.32 -0.42    -0.65  0.01
## tscore.2                              0.44    0.78    0.34  0.10    -0.74  0.01
## tscore.10                             0.14    0.45    0.31  0.54     1.06  0.01
## MI.3                                  0.15    0.36    0.22  0.29    -0.55  0.01
## MI.7                                  0.00    0.03    0.03  0.57    -0.71  0.00
## Logdicegt7                            0.00    0.06    0.06  0.44     0.54  0.00
## DiversityMTLD                        19.20   63.09   43.89  0.28    -0.68  1.61
## FrequencyJACETInfrequentwordratio     0.00    0.16    0.16 -0.19    -0.28  0.01
## WordCount                            57.00  208.00  151.00  0.50    -0.65  6.28
## COCA_spoken_Range_Log_AW             -0.52   -0.26    0.26 -0.46     0.63  0.01
## COCA_spoken_Frequency_Log_AW          2.92    3.36    0.44 -0.47     0.55  0.01
## COCA_spoken_Bigram_Frequency_Log      1.25    1.69    0.44  0.00    -0.96  0.02
## COCA_spoken_Bigram_Range_Log         -1.51   -1.12    0.39  0.00    -0.81  0.02
## COCA_spoken_Trigram_Frequency_Log     0.25    0.87    0.62 -0.12     0.01  0.02
## COCA_spoken_Trigram_Range_Log        -2.39   -1.82    0.57 -0.20    -0.06  0.02
## IndexCOCA_spoken_Frequency_Log_AW     1.41    1.91    0.50  0.23    -1.14  0.02
## FAC1_1                               -1.88    2.04    3.92  0.02    -0.85  0.16
## FAC2_1                               -2.66    2.28    4.95 -0.08    -0.14  0.16
## FAC3_1                               -2.94    2.37    5.31 -0.63     0.85  0.16
## RawFirst                              5.14    7.15    2.01  0.38    -0.82  0.09
## RawSecond                           109.00  932.39  823.39  0.54    -1.03 36.79
## RawThird                             90.48 1091.65 1001.17  1.62     3.30 33.88
## RawFourth                            68.47  813.04  744.58  1.32     1.67 25.47
## T_First                               1.93   17.04   15.11  0.28    -0.46  0.53
## T_Second                              2.04   14.91   12.87  0.20    -0.42  0.48
## T_Third                              -0.73   12.22   12.95 -0.37     0.55  0.41
## T_Fourth                             -2.20   14.30   16.49 -0.40     0.35  0.51
## MI_First1                             1.16    3.25    2.08  0.12    -0.47  0.08
## MI_Second                             0.78    3.05    2.27  0.09    -0.35  0.08
## MI_Third                              0.43    3.04    2.61  0.19     1.28  0.07
## MI_Fourth                             0.34    2.51    2.17 -0.46    -0.72  0.09
## Richness                              0.19    0.64    0.44 -0.11    -1.04  0.02
## MTLD                                 19.20   63.09   43.89  0.28    -0.68  1.61
## Fluency                               0.24    0.83    0.59  0.07    -1.16  0.03
## ArticulationRate                     50.95  176.20  125.25  0.14    -0.26  4.21
## FilledPause                           0.00    0.22    0.22  0.89    -0.44  0.01
## SilentPause                           0.28    1.16    0.88  0.51    -0.35  0.03
## invertedRawFreq                    -712.80 -200.70  512.10 -0.35    -0.85 20.44
## invertedFirstRawFreq              -1278.52 -170.88 1107.64 -1.31     0.90 46.59

Correlational analysis

Correlation between collocation knowledge measures

freq_tscore = rob.cor.mcmc(cbind(Rawfreq, tscore), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.6531716, SD = 0.09721351
## Posterior median and MAD:                  Median = 0.6645686, MAD = 0.09429324
## Rho values with 99% posterior probability: 99% HPDI = [0.368715, 0.8486897]
## Rho values with 95% posterior probability: 95% HPDI = [0.4548916, 0.8233276]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 1
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 6.25e-05
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.9995
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.986375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.7328125
print(freq_tscore)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##             mean se_mean    sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]       5.92    0.00  0.06   5.81   5.88   5.92   5.96   6.03 11532    1
## mu[2]       7.49    0.00  0.32   6.85   7.27   7.49   7.70   8.12 11764    1
## sigma[1]    0.34    0.00  0.04   0.27   0.31   0.34   0.37   0.43 11578    1
## sigma[2]    1.95    0.00  0.24   1.54   1.78   1.93   2.10   2.47 11505    1
## nu         27.95    0.11 14.91   8.16  17.11  24.80  35.60  64.27 18061    1
## rho         0.65    0.00  0.10   0.43   0.59   0.66   0.72   0.81 11318    1
## cov[1,1]    0.12    0.00  0.03   0.07   0.10   0.11   0.13   0.19 11223    1
## cov[1,2]    0.44    0.00  0.14   0.23   0.34   0.42   0.52   0.77  8569    1
## cov[2,1]    0.44    0.00  0.14   0.23   0.34   0.42   0.52   0.77  8569    1
## cov[2,2]    3.86    0.01  0.97   2.39   3.17   3.73   4.42   6.11 11185    1
## x_rand[1]   5.92    0.00  0.36   5.19   5.68   5.92   6.15   6.65 15468    1
## x_rand[2]   7.47    0.02  2.08   3.36   6.14   7.48   8.80  11.62 15662    1
## lp__      -38.81    0.02  1.76 -43.14 -39.73 -38.50 -37.53 -36.35  7449    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:46:22 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(freq_tscore, tscore, Rawfreq)

freq_MI = rob.cor.mcmc(cbind(Rawfreq, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.3227581, SD = 0.1496301
## Posterior median and MAD:                  Median = 0.3300694, MAD = 0.1515278
## Rho values with 99% posterior probability: 99% HPDI = [-0.09185931, 0.6562387]
## Rho values with 95% posterior probability: 95% HPDI = [0.02322832, 0.6030461]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.0216875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.9783125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.07175
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0315625
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.6970625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.3196875
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0216875
print(freq_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      5.92    0.00  0.06  5.81  5.88  5.92  5.96  6.03 17074    1
## mu[2]      1.83    0.00  0.06  1.72  1.79  1.83  1.87  1.94 17326    1
## sigma[1]   0.34    0.00  0.04  0.27  0.31  0.34  0.37  0.44 16589    1
## sigma[2]   0.34    0.00  0.04  0.26  0.31  0.33  0.36  0.43 17332    1
## nu        26.53    0.11 14.82  7.22 15.66 23.43 33.89 63.42 19682    1
## rho        0.32    0.00  0.15  0.01  0.22  0.33  0.43  0.59 17947    1
## cov[1,1]   0.12    0.00  0.03  0.07  0.10  0.11  0.13  0.19 15814    1
## cov[1,2]   0.04    0.00  0.02  0.00  0.02  0.04  0.05  0.09 14196    1
## cov[2,1]   0.04    0.00  0.02  0.00  0.02  0.04  0.05  0.09 14196    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.09  0.11  0.13  0.19 16287    1
## x_rand[1]  5.92    0.00  0.37  5.19  5.68  5.92  6.15  6.64 15820    1
## x_rand[2]  1.83    0.00  0.37  1.10  1.60  1.83  2.07  2.56 15140    1
## lp__      20.37    0.02  1.77 16.12 19.45 20.70 21.66 22.81  7463    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:46:38 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(freq_MI, MI, Rawfreq)

tscore_MI = rob.cor.mcmc(cbind(tscore, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.7891313, SD = 0.06433779
## Posterior median and MAD:                  Median = 0.7972896, MAD = 0.06065465
## Rho values with 99% posterior probability: 99% HPDI = [0.5907299, 0.9168299]
## Rho values with 95% posterior probability: 95% HPDI = [0.6576899, 0.9004247]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 1
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 1
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 1
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.9900625
print(tscore_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##             mean se_mean    sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]       7.49    0.00  0.32   6.86   7.28   7.49   7.70   8.12  8810    1
## mu[2]       1.83    0.00  0.05   1.72   1.79   1.83   1.86   1.94  8944    1
## sigma[1]    1.93    0.00  0.24   1.52   1.76   1.91   2.07   2.44  8692    1
## sigma[2]    0.33    0.00  0.04   0.26   0.30   0.33   0.36   0.42  8718    1
## nu         26.11    0.12 14.75   7.06  15.38  22.85  33.54  63.05 14827    1
## rho         0.79    0.00  0.06   0.64   0.75   0.80   0.84   0.89 10231    1
## cov[1,1]    3.76    0.01  0.94   2.31   3.10   3.63   4.28   5.96  8487    1
## cov[1,2]    0.51    0.00  0.14   0.29   0.41   0.49   0.59   0.85  7257    1
## cov[2,1]    0.51    0.00  0.14   0.29   0.41   0.49   0.59   0.85  7257    1
## cov[2,2]    0.11    0.00  0.03   0.07   0.09   0.11   0.13   0.18  8654    1
## x_rand[1]   7.47    0.02  2.09   3.36   6.13   7.48   8.82  11.62 15439    1
## x_rand[2]   1.83    0.00  0.36   1.11   1.60   1.83   2.06   2.53 16026    1
## lp__      -29.83    0.02  1.76 -34.12 -30.80 -29.50 -28.53 -27.38  6835    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:46:54 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(tscore_MI, MI, tscore)

Correlational analysis on Total response measures

Fluency rating (speech rate)

Fluency_RawFreq = rob.cor.mcmc(cbind(Fluency, Rawfreq), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.1291422, SD = 0.1605817
## Posterior median and MAD:                  Median = -0.131612, MAD = 0.1633996
## Rho values with 99% posterior probability: 99% HPDI = [-0.5223624, 0.2803069]
## Rho values with 95% posterior probability: 95% HPDI = [-0.4368925, 0.185007]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.7885
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.2115
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.343
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.170875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.2428125
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0424375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 5e-04
print(Fluency_RawFreq)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.52    0.00  0.03  0.46  0.50  0.52  0.54  0.58 19339    1
## mu[2]      5.92    0.00  0.06  5.81  5.89  5.93  5.96  6.04 20953    1
## sigma[1]   0.17    0.00  0.02  0.14  0.16  0.17  0.19  0.22 19044    1
## sigma[2]   0.35    0.00  0.04  0.27  0.32  0.34  0.37  0.44 16911    1
## nu        29.25    0.10 15.07  9.01 18.29 26.21 36.85 66.50 21371    1
## rho       -0.13    0.00  0.16 -0.43 -0.24 -0.13 -0.02  0.19 19363    1
## cov[1,1]   0.03    0.00  0.01  0.02  0.02  0.03  0.03  0.05 17842    1
## cov[1,2]  -0.01    0.00  0.01 -0.03 -0.01 -0.01  0.00  0.01 15475    1
## cov[2,1]  -0.01    0.00  0.01 -0.03 -0.01 -0.01  0.00  0.01 15475    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.10  0.12  0.14  0.19 15459    1
## x_rand[1]  0.52    0.00  0.19  0.16  0.40  0.52  0.64  0.89 15994    1
## x_rand[2]  5.93    0.00  0.37  5.19  5.69  5.93  6.16  6.66 16004    1
## lp__      45.14    0.02  1.77 40.88 44.20 45.47 46.43 47.55  7650    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:47:07 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Fluency_RawFreq, Rawfreq, Fluency)

Fluency_tscore = rob.cor.mcmc(cbind(Fluency, tscore), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.09449891, SD = 0.1618102
## Posterior median and MAD:                  Median = -0.09772779, MAD = 0.1648836
## Rho values with 99% posterior probability: 99% HPDI = [-0.5091974, 0.301825]
## Rho values with 95% posterior probability: 95% HPDI = [-0.4049108, 0.2224573]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.718125
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.281875
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.3866875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.1983125
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.191625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0265625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.000125
print(Fluency_tscore)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##             mean se_mean    sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]       0.52    0.00  0.03   0.46   0.50   0.52   0.54   0.57 20013    1
## mu[2]       7.48    0.00  0.32   6.84   7.26   7.48   7.69   8.11 18914    1
## sigma[1]    0.17    0.00  0.02   0.14   0.16   0.17   0.19   0.22 18159    1
## sigma[2]    1.96    0.00  0.25   1.53   1.78   1.94   2.11   2.51 17674    1
## nu         27.71    0.10 14.82   8.04  16.97  24.69  35.14  65.20 21894    1
## rho        -0.09    0.00  0.16  -0.40  -0.21  -0.10   0.02   0.23 17678    1
## cov[1,1]    0.03    0.00  0.01   0.02   0.02   0.03   0.03   0.05 17024    1
## cov[1,2]   -0.03    0.00  0.06  -0.16  -0.07  -0.03   0.01   0.08 14105    1
## cov[2,1]   -0.03    0.00  0.06  -0.16  -0.07  -0.03   0.01   0.08 14105    1
## cov[2,2]    3.90    0.01  1.03   2.34   3.18   3.75   4.44   6.31 16431    1
## x_rand[1]   0.52    0.00  0.19   0.15   0.40   0.52   0.64   0.88 15949    1
## x_rand[2]   7.49    0.02  2.09   3.39   6.14   7.50   8.84  11.62 15643    1
## lp__      -23.00    0.02  1.78 -27.38 -23.95 -22.65 -21.68 -20.56  7105    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:47:18 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Fluency_tscore, tscore, Fluency)

Fluency_MI = rob.cor.mcmc(cbind(Fluency, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.08546026, SD = 0.1631664
## Posterior median and MAD:                  Median = 0.08865775, MAD = 0.1652713
## Rho values with 99% posterior probability: 99% HPDI = [-0.3349234, 0.4725509]
## Rho values with 95% posterior probability: 95% HPDI = [-0.2305266, 0.3998928]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.2994375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.7005625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.391875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.206875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.183375
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.022625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0001875
print(Fluency_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.52    0.00  0.03  0.46  0.50  0.52  0.54  0.58 19712    1
## mu[2]      1.82    0.00  0.06  1.71  1.79  1.82  1.86  1.93 19224    1
## sigma[1]   0.17    0.00  0.02  0.14  0.16  0.17  0.18  0.22 17919    1
## sigma[2]   0.34    0.00  0.04  0.26  0.31  0.33  0.36  0.43 19153    1
## nu        27.14    0.10 14.62  7.62 16.43 24.18 34.72 63.39 20759    1
## rho        0.09    0.00  0.16 -0.24 -0.02  0.09  0.20  0.39 17671    1
## cov[1,1]   0.03    0.00  0.01  0.02  0.02  0.03  0.03  0.05 16610    1
## cov[1,2]   0.01    0.00  0.01 -0.02  0.00  0.00  0.01  0.03 14351    1
## cov[2,1]   0.01    0.00  0.01 -0.02  0.00  0.00  0.01  0.03 14351    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.09  0.11  0.13  0.19 18262    1
## x_rand[1]  0.52    0.00  0.18  0.15  0.40  0.52  0.64  0.89 15801    1
## x_rand[2]  1.82    0.00  0.36  1.11  1.60  1.82  2.05  2.55 16122    1
## lp__      45.37    0.02  1.76 41.04 44.43 45.70 46.66 47.81  7524    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:47:29 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Fluency_MI, MI, Fluency)

Articulation Rate

ArticulationRate_RawFreq = rob.cor.mcmc(cbind(ArticulationRate, Rawfreq), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.0566811, SD = 0.163747
## Posterior median and MAD:                  Median = -0.05736719, MAD = 0.1684909
## Rho values with 99% posterior probability: 99% HPDI = [-0.4487793, 0.3658917]
## Rho values with 95% posterior probability: 95% HPDI = [-0.36673, 0.2657182]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.6335
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.3665
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4251875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.21975
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.15525
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.01625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0001875
print(ArticulationRate_RawFreq)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean     sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]      109.01    0.03   4.46  100.34  106.05  109.01  111.98  117.83 19402
## mu[2]        5.92    0.00   0.06    5.81    5.88    5.92    5.96    6.03 19364
## sigma[1]    26.93    0.03   3.43   21.06   24.52   26.62   29.02   34.48 18431
## sigma[2]     0.35    0.00   0.04    0.27    0.32    0.34    0.37    0.44 18747
## nu          27.64    0.10  14.85    8.00   16.73   24.54   35.27   64.40 21587
## rho         -0.06    0.00   0.16   -0.37   -0.17   -0.06    0.06    0.27 18464
## cov[1,1]   736.89    1.45 191.96  443.63  601.14  708.72  842.40 1188.60 17623
## cov[1,2]    -0.54    0.01   1.64   -3.86   -1.55   -0.50    0.50    2.68 15234
## cov[2,1]    -0.54    0.01   1.64   -3.86   -1.55   -0.50    0.50    2.68 15234
## cov[2,2]     0.12    0.00   0.03    0.07    0.10    0.12    0.14    0.20 17491
## x_rand[1]  108.94    0.23  28.90   51.07   90.30  109.16  127.75  165.45 16093
## x_rand[2]    5.92    0.00   0.37    5.18    5.68    5.92    6.16    6.64 16266
## lp__      -152.63    0.02   1.79 -157.03 -153.57 -152.30 -151.32 -150.19  7226
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:47:41 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(ArticulationRate_RawFreq, Rawfreq, ArticulationRate)

ArticulationRate_tscore = rob.cor.mcmc(cbind(ArticulationRate, tscore), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.01932372, SD = 0.1621637
## Posterior median and MAD:                  Median = 0.02062794, MAD = 0.1656464
## Rho values with 99% posterior probability: 99% HPDI = [-0.3964707, 0.4159959]
## Rho values with 95% posterior probability: 95% HPDI = [-0.3064207, 0.3215319]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.448875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.551125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.446625
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.234875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.1275625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.012
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0
print(ArticulationRate_tscore)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean     sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]      108.74    0.03   4.43   99.86  105.85  108.76  111.68  117.44 19968
## mu[2]        7.48    0.00   0.33    6.83    7.26    7.48    7.69    8.11 21227
## sigma[1]    26.87    0.02   3.48   20.96   24.44   26.57   28.98   34.46 20411
## sigma[2]     1.96    0.00   0.25    1.53    1.78    1.94    2.11    2.50 19638
## nu          26.00    0.10  14.48    7.13   15.50   22.90   33.29   62.40 21202
## rho          0.02    0.00   0.16   -0.30   -0.09    0.02    0.13    0.33 19967
## cov[1,1]   734.16    1.40 194.78  439.23  597.53  706.10  840.05 1187.69 19232
## cov[1,2]     1.06    0.07   9.16  -17.26   -4.57    1.01    6.74   19.37 16362
## cov[2,1]     1.06    0.07   9.16  -17.26   -4.57    1.01    6.74   19.37 16362
## cov[2,2]     3.90    0.01   1.01    2.36    3.19    3.75    4.45    6.26 18130
## x_rand[1]  108.33    0.23  29.08   50.36   89.71  108.28  126.85  166.20 15785
## x_rand[2]    7.47    0.02   2.12    3.24    6.11    7.46    8.82   11.68 15883
## lp__      -220.65    0.02   1.78 -224.95 -221.59 -220.31 -219.35 -218.21  7256
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:47:54 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(ArticulationRate_tscore, tscore, ArticulationRate)

ArticulationRate_MI = rob.cor.mcmc(cbind(ArticulationRate, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.1420418, SD = 0.1622194
## Posterior median and MAD:                  Median = 0.1456006, MAD = 0.1620058
## Rho values with 99% posterior probability: 99% HPDI = [-0.2782159, 0.5273764]
## Rho values with 95% posterior probability: 95% HPDI = [-0.1733782, 0.45813]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.1905
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.8095
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.3169375
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.1559375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.2691875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.054875
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 5e-04
print(ArticulationRate_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean     sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]      109.03    0.03   4.46  100.27  106.01  109.07  112.01  117.77 22174
## mu[2]        1.82    0.00   0.06    1.71    1.79    1.82    1.86    1.93 20274
## sigma[1]    26.81    0.03   3.43   20.88   24.43   26.51   28.88   34.48 17741
## sigma[2]     0.34    0.00   0.04    0.26    0.31    0.33    0.36    0.44 19487
## nu          25.45    0.10  14.46    6.85   14.92   22.34   32.70   61.45 21151
## rho          0.14    0.00   0.16   -0.19    0.04    0.15    0.25    0.45 19938
## cov[1,1]   730.53    1.47 190.79  436.08  596.70  702.62  833.83 1189.16 16924
## cov[1,2]     1.32    0.01   1.63   -1.78    0.30    1.24    2.26    4.72 15459
## cov[2,1]     1.32    0.01   1.63   -1.78    0.30    1.24    2.26    4.72 15459
## cov[2,2]     0.12    0.00   0.03    0.07    0.10    0.11    0.13    0.19 18187
## x_rand[1]  109.30    0.23  29.23   50.97   90.59  109.22  128.04  167.56 15722
## x_rand[2]    1.82    0.00   0.36    1.10    1.59    1.82    2.05    2.54 15386
## lp__      -151.89    0.02   1.80 -156.31 -152.84 -151.57 -150.57 -149.41  6096
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:48:07 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(ArticulationRate_MI, MI, ArticulationRate)

Silent Pause ratio

SilentPause_RawFreq = rob.cor.mcmc(cbind(SilentPause, Rawfreq), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.147073, SD = 0.1621543
## Posterior median and MAD:                  Median = 0.1497355, MAD = 0.1656522
## Rho values with 99% posterior probability: 99% HPDI = [-0.2588787, 0.5319099]
## Rho values with 95% posterior probability: 95% HPDI = [-0.1560767, 0.4704009]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.1866875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.8133125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.314375
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.15825
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.2801875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0559375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.001125
print(SilentPause_RawFreq)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.59    0.00  0.04  0.53  0.57  0.59  0.62  0.66 19662    1
## mu[2]      5.92    0.00  0.06  5.81  5.89  5.92  5.96  6.04 20221    1
## sigma[1]   0.21    0.00  0.03  0.17  0.19  0.21  0.23  0.27 18561    1
## sigma[2]   0.34    0.00  0.04  0.27  0.31  0.34  0.37  0.44 18666    1
## nu        27.09    0.10 14.57  7.76 16.60 24.05 34.21 62.93 21738    1
## rho        0.15    0.00  0.16 -0.18  0.04  0.15  0.26  0.45 20786    1
## cov[1,1]   0.05    0.00  0.01  0.03  0.04  0.04  0.05  0.08 17256    1
## cov[1,2]   0.01    0.00  0.01 -0.01  0.00  0.01  0.02  0.04 16899    1
## cov[2,1]   0.01    0.00  0.01 -0.01  0.00  0.01  0.02  0.04 16899    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.10  0.12  0.14  0.19 17478    1
## x_rand[1]  0.60    0.00  0.23  0.15  0.45  0.60  0.74  1.05 15521    1
## x_rand[2]  5.93    0.00  0.37  5.19  5.69  5.92  6.16  6.67 16150    1
## lp__      36.37    0.02  1.80 31.97 35.43 36.71 37.68 38.83  6780    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:48:18 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(SilentPause_RawFreq, Rawfreq, SilentPause)

SilentPause_tscore = rob.cor.mcmc(cbind(SilentPause, tscore), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.09399211, SD = 0.161006
## Posterior median and MAD:                  Median = 0.0958358, MAD = 0.162334
## Rho values with 99% posterior probability: 99% HPDI = [-0.3087875, 0.4999733]
## Rho values with 95% posterior probability: 95% HPDI = [-0.2286147, 0.4017997]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.2794375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.7205625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.390625
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.200375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.18375
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.02775
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0003125
print(SilentPause_tscore)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##             mean se_mean    sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]       0.60    0.00  0.04   0.53   0.57   0.60   0.62   0.67 19496    1
## mu[2]       7.48    0.00  0.32   6.85   7.27   7.48   7.70   8.13 18197    1
## sigma[1]    0.21    0.00  0.03   0.17   0.20   0.21   0.23   0.28 17023    1
## sigma[2]    1.96    0.00  0.25   1.54   1.79   1.94   2.12   2.52 17735    1
## nu         26.87    0.10 14.72   7.62  16.09  23.64  34.53  63.05 20069    1
## rho         0.09    0.00  0.16  -0.23  -0.01   0.10   0.21   0.40 16842    1
## cov[1,1]    0.05    0.00  0.01   0.03   0.04   0.04   0.05   0.08 16068    1
## cov[1,2]    0.04    0.00  0.07  -0.10  -0.01   0.04   0.09   0.20 13928    1
## cov[2,1]    0.04    0.00  0.07  -0.10  -0.01   0.04   0.09   0.20 13928    1
## cov[2,2]    3.92    0.01  1.02   2.37   3.19   3.76   4.47   6.35 16559    1
## x_rand[1]   0.60    0.00  0.23   0.13   0.45   0.60   0.74   1.06 16109    1
## x_rand[2]   7.48    0.02  2.09   3.32   6.15   7.50   8.83  11.62 16034    1
## lp__      -31.95    0.02  1.79 -36.30 -32.91 -31.61 -30.62 -29.49  7562    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:48:29 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(SilentPause_tscore, tscore, SilentPause)

SilentPause_MI = rob.cor.mcmc(cbind(SilentPause, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.01559714, SD = 0.165624
## Posterior median and MAD:                  Median = 0.01651298, MAD = 0.1665637
## Rho values with 99% posterior probability: 99% HPDI = [-0.4152384, 0.4319221]
## Rho values with 95% posterior probability: 95% HPDI = [-0.3132122, 0.3331659]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.462875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.537125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4506875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.2324375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.135875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.01575
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0001875
print(SilentPause_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.59    0.00  0.03  0.53  0.57  0.59  0.62  0.66 21625    1
## mu[2]      1.82    0.00  0.06  1.71  1.79  1.82  1.86  1.93 23504    1
## sigma[1]   0.21    0.00  0.03  0.16  0.19  0.21  0.23  0.27 19376    1
## sigma[2]   0.34    0.00  0.04  0.26  0.31  0.33  0.36  0.43 19070    1
## nu        24.39    0.10 14.16  6.23 14.06 21.37 31.38 59.96 21045    1
## rho        0.02    0.00  0.17 -0.31 -0.10  0.02  0.13  0.34 20634    1
## cov[1,1]   0.05    0.00  0.01  0.03  0.04  0.04  0.05  0.07 18126    1
## cov[1,2]   0.00    0.00  0.01 -0.02 -0.01  0.00  0.01  0.03 16359    1
## cov[2,1]   0.00    0.00  0.01 -0.02 -0.01  0.00  0.01  0.03 16359    1
## cov[2,2]   0.11    0.00  0.03  0.07  0.09  0.11  0.13  0.18 18023    1
## x_rand[1]  0.60    0.00  0.23  0.14  0.45  0.59  0.74  1.05 16097    1
## x_rand[2]  1.82    0.00  0.36  1.11  1.59  1.82  2.05  2.55 16116    1
## lp__      36.48    0.02  1.78 32.16 35.53 36.80 37.79 38.93  6802    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:48:40 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(SilentPause_MI, MI, SilentPause)

Filled pause ratio

FilledPause_RawFreq = rob.cor.mcmc(cbind(FilledPause, Rawfreq), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.3254999, SD = 0.1560133
## Posterior median and MAD:                  Median = -0.3326866, MAD = 0.1559201
## Rho values with 99% posterior probability: 99% HPDI = [-0.680629, 0.1036161]
## Rho values with 95% posterior probability: 95% HPDI = [-0.6327542, -0.02885761]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.9748125
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.0251875
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.07475
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0325
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.6996875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.3305
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0283125
print(FilledPause_RawFreq)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.07    0.00  0.01  0.05  0.06  0.07  0.07  0.09 16004    1
## mu[2]      5.93    0.00  0.06  5.81  5.89  5.93  5.96  6.04 16908    1
## sigma[1]   0.06    0.00  0.01  0.05  0.05  0.06  0.06  0.08 15967    1
## sigma[2]   0.34    0.00  0.04  0.27  0.31  0.34  0.37  0.44 16632    1
## nu        22.35    0.10 13.82  5.27 12.28 19.11 29.14 57.53 18424    1
## rho       -0.33    0.00  0.16 -0.61 -0.44 -0.33 -0.22  0.00 17737    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.01 15572    1
## cov[1,2]  -0.01    0.00  0.00 -0.02 -0.01 -0.01  0.00  0.00 13741    1
## cov[2,1]  -0.01    0.00  0.00 -0.02 -0.01 -0.01  0.00  0.00 13741    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.10  0.11  0.13  0.19 15927    1
## x_rand[1]  0.07    0.00  0.07 -0.06  0.03  0.07  0.11  0.20 15851    1
## x_rand[2]  5.93    0.00  0.38  5.18  5.69  5.93  6.16  6.68 15835    1
## lp__      86.86    0.02  1.77 82.58 85.92 87.20 88.16 89.31  7261    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:48:52 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(FilledPause_RawFreq, Rawfreq, FilledPause)

FilledPause_tscore = rob.cor.mcmc(cbind(FilledPause, tscore), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.2819368, SD = 0.1518944
## Posterior median and MAD:                  Median = -0.2884884, MAD = 0.1537399
## Rho values with 99% posterior probability: 99% HPDI = [-0.6306421, 0.1248719]
## Rho values with 95% posterior probability: 95% HPDI = [-0.5737772, 0.01302256]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.96025
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.03975
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.113875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.054375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.5998125
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.2294375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0091875
print(FilledPause_tscore)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.07    0.00  0.01  0.05  0.06  0.07  0.08  0.09 15902    1
## mu[2]      7.52    0.00  0.33  6.88  7.30  7.52  7.74  8.17 17822    1
## sigma[1]   0.06    0.00  0.01  0.05  0.06  0.06  0.07  0.08 16588    1
## sigma[2]   1.94    0.00  0.25  1.52  1.77  1.92  2.09  2.50 17555    1
## nu        25.21    0.11 14.54  6.43 14.59 22.04 32.68 61.38 19026    1
## rho       -0.28    0.00  0.15 -0.56 -0.39 -0.29 -0.18  0.03 17314    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.01 15658    1
## cov[1,2]  -0.03    0.00  0.02 -0.08 -0.05 -0.03 -0.02  0.00 14650    1
## cov[2,1]  -0.03    0.00  0.02 -0.08 -0.05 -0.03 -0.02  0.00 14650    1
## cov[2,2]   3.84    0.01  1.00  2.30  3.14  3.70  4.39  6.25 16578    1
## x_rand[1]  0.07    0.00  0.07 -0.06  0.03  0.07  0.11  0.20 15815    1
## x_rand[2]  7.53    0.02  2.10  3.35  6.19  7.55  8.88 11.67 15153    1
## lp__      18.09    0.02  1.79 13.79 17.14 18.43 19.41 20.57  7204    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:49:05 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(FilledPause_tscore, tscore, FilledPause)

FilledPause_MI = rob.cor.mcmc(cbind(FilledPause, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.121326, SD = 0.1609757
## Posterior median and MAD:                  Median = -0.1249063, MAD = 0.1632253
## Rho values with 99% posterior probability: 99% HPDI = [-0.5025058, 0.2985371]
## Rho values with 95% posterior probability: 95% HPDI = [-0.4418612, 0.1798861]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.7746875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.2253125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.350875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.1751875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.230875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0386875
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0005625
print(FilledPause_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.07    0.00  0.01  0.05  0.06  0.07  0.08  0.09 20815    1
## mu[2]      1.83    0.00  0.06  1.71  1.79  1.83  1.86  1.94 20411    1
## sigma[1]   0.06    0.00  0.01  0.05  0.06  0.06  0.07  0.08 17743    1
## sigma[2]   0.34    0.00  0.04  0.26  0.31  0.33  0.36  0.43 18631    1
## nu        25.34    0.10 14.66  6.40 14.67 22.15 32.70 61.91 20283    1
## rho       -0.12    0.00  0.16 -0.43 -0.23 -0.12 -0.01  0.21 20049    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.01 17162    1
## cov[1,2]   0.00    0.00  0.00 -0.01  0.00  0.00  0.00  0.00 16596    1
## cov[2,1]   0.00    0.00  0.00 -0.01  0.00  0.00  0.00  0.00 16596    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.09  0.11  0.13  0.19 17715    1
## x_rand[1]  0.07    0.00  0.07 -0.06  0.03  0.07  0.11  0.20 16190    1
## x_rand[2]  1.83    0.00  0.37  1.10  1.60  1.83  2.06  2.55 16219    1
## lp__      85.22    0.02  1.77 80.87 84.26 85.54 86.52 87.67  6868    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:49:17 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(FilledPause_MI, MI, FilledPause)

Lexis

Richness rating

Richness_RawFreq = rob.cor.mcmc(cbind(Richness, Rawfreq), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.1773432, SD = 0.159801
## Posterior median and MAD:                  Median = -0.1815353, MAD = 0.1629681
## Rho values with 99% posterior probability: 99% HPDI = [-0.5768741, 0.22718]
## Rho values with 95% posterior probability: 95% HPDI = [-0.4803125, 0.1355636]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.862375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.137625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.26675
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.132125
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.3406875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.077625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0016875
print(Richness_RawFreq)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.42    0.00  0.02  0.38  0.40  0.42  0.43  0.46 18164    1
## mu[2]      5.93    0.00  0.06  5.81  5.89  5.93  5.96  6.04 19969    1
## sigma[1]   0.13    0.00  0.02  0.10  0.12  0.13  0.14  0.16 18318    1
## sigma[2]   0.35    0.00  0.04  0.27  0.32  0.34  0.37  0.44 20251    1
## nu        29.52    0.10 15.32  8.98 18.39 26.48 37.38 67.45 21739    1
## rho       -0.18    0.00  0.16 -0.48 -0.29 -0.18 -0.07  0.14 18185    1
## cov[1,1]   0.02    0.00  0.00  0.01  0.01  0.02  0.02  0.03 17240    1
## cov[1,2]  -0.01    0.00  0.01 -0.03 -0.01 -0.01  0.00  0.01 14980    1
## cov[2,1]  -0.01    0.00  0.01 -0.03 -0.01 -0.01  0.00  0.01 14980    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.10  0.12  0.14  0.19 18949    1
## x_rand[1]  0.42    0.00  0.14  0.15  0.33  0.42  0.50  0.69 16068    1
## x_rand[2]  5.93    0.00  0.37  5.19  5.69  5.93  6.16  6.64 16235    1
## lp__      57.47    0.02  1.79 53.05 56.52 57.80 58.78 59.94  7489    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:49:28 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Richness_RawFreq, Rawfreq, Richness)

Richness_tscore = rob.cor.mcmc(cbind(Richness, tscore), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.03770546, SD = 0.164284
## Posterior median and MAD:                  Median = -0.0379153, MAD = 0.1680372
## Rho values with 99% posterior probability: 99% HPDI = [-0.4439338, 0.3782282]
## Rho values with 95% posterior probability: 95% HPDI = [-0.3642209, 0.2739932]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.5898125
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.4101875
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4390625
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.228875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.1385
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.01625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 6.25e-05
print(Richness_tscore)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##             mean se_mean    sd   2.5%    25%    50%   75% 97.5% n_eff Rhat
## mu[1]       0.42    0.00  0.02   0.38   0.40   0.42  0.43  0.46 18902    1
## mu[2]       7.49    0.00  0.32   6.85   7.27   7.49  7.70  8.11 20029    1
## sigma[1]    0.13    0.00  0.02   0.10   0.12   0.12  0.14  0.16 18116    1
## sigma[2]    1.96    0.00  0.25   1.54   1.79   1.94  2.11  2.52 18753    1
## nu         28.24    0.10 15.15   8.16  17.21  25.11 36.02 65.76 21564    1
## rho        -0.04    0.00  0.16  -0.35  -0.15  -0.04  0.07  0.29 18947    1
## cov[1,1]    0.02    0.00  0.00   0.01   0.01   0.02  0.02  0.03 17063    1
## cov[1,2]   -0.01    0.00  0.04  -0.10  -0.04  -0.01  0.02  0.08 15297    1
## cov[2,1]   -0.01    0.00  0.04  -0.10  -0.04  -0.01  0.02  0.08 15297    1
## cov[2,2]    3.91    0.01  1.00   2.38   3.21   3.77  4.44  6.33 17708    1
## x_rand[1]   0.42    0.00  0.14   0.14   0.33   0.42  0.50  0.68 15434    1
## x_rand[2]   7.50    0.02  2.10   3.28   6.19   7.50  8.84 11.66 16040    1
## lp__      -11.13    0.02  1.78 -15.46 -12.07 -10.78 -9.84 -8.68  7354    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:49:39 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Richness_tscore, tscore, Richness)

Richness_MI = rob.cor.mcmc(cbind(Richness, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.1399738, SD = 0.1607093
## Posterior median and MAD:                  Median = 0.1443422, MAD = 0.1639349
## Rho values with 99% posterior probability: 99% HPDI = [-0.266725, 0.5330212]
## Rho values with 95% posterior probability: 95% HPDI = [-0.1691515, 0.4522867]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.1949375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.8050625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.3230625
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.167
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.2634375
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.049125
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 5e-04
print(Richness_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.42    0.00  0.02  0.38  0.40  0.42  0.43  0.46 19492    1
## mu[2]      1.82    0.00  0.06  1.72  1.79  1.82  1.86  1.94 20313    1
## sigma[1]   0.13    0.00  0.02  0.10  0.11  0.12  0.14  0.16 18127    1
## sigma[2]   0.34    0.00  0.04  0.27  0.31  0.33  0.36  0.43 18912    1
## nu        27.03    0.10 14.55  7.61 16.40 24.14 34.21 63.17 20773    1
## rho        0.14    0.00  0.16 -0.18  0.03  0.14  0.25  0.44 18067    1
## cov[1,1]   0.02    0.00  0.00  0.01  0.01  0.02  0.02  0.03 17252    1
## cov[1,2]   0.01    0.00  0.01 -0.01  0.00  0.01  0.01  0.02 15278    1
## cov[2,1]   0.01    0.00  0.01 -0.01  0.00  0.01  0.01  0.02 15278    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.10  0.11  0.13  0.19 17866    1
## x_rand[1]  0.41    0.00  0.13  0.15  0.33  0.41  0.50  0.68 15837    1
## x_rand[2]  1.82    0.00  0.36  1.10  1.59  1.82  2.05  2.54 15969    1
## lp__      57.63    0.02  1.75 53.47 56.68 57.96 58.92 60.05  7910    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:49:50 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Richness_MI, MI, Richness)

Diversity

MTLD_RawFreq = rob.cor.mcmc(cbind(MTLD, Rawfreq), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.2199496, SD = 0.155176
## Posterior median and MAD:                  Median = -0.2256312, MAD = 0.1571495
## Rho values with 99% posterior probability: 99% HPDI = [-0.5906061, 0.1832651]
## Rho values with 95% posterior probability: 95% HPDI = [-0.5212153, 0.08307372]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.91425
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.08575
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.1938125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.09375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.438
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.121375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.003
print(MTLD_RawFreq)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean    sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]       37.02    0.01  1.72   33.64   35.89   37.04   38.14   40.42 20045
## mu[2]        5.93    0.00  0.06    5.81    5.89    5.93    5.96    6.04 19724
## sigma[1]    10.42    0.01  1.30    8.19    9.50   10.30   11.19   13.33 20648
## sigma[2]     0.35    0.00  0.04    0.27    0.32    0.34    0.37    0.44 18850
## nu          28.89    0.10 15.15    8.55   17.86   25.83   36.49   66.25 24952
## rho         -0.22    0.00  0.16   -0.51   -0.33   -0.23   -0.12    0.10 20239
## cov[1,1]   110.24    0.20 28.25   67.10   90.30  106.11  125.32  177.74 19227
## cov[1,2]    -0.81    0.01  0.64   -2.22   -1.18   -0.77   -0.39    0.35 16529
## cov[2,1]    -0.81    0.01  0.64   -2.22   -1.18   -0.77   -0.39    0.35 16529
## cov[2,2]     0.12    0.00  0.03    0.07    0.10    0.12    0.14    0.19 17834
## x_rand[1]   37.02    0.09 11.10   15.03   29.85   36.97   44.10   58.99 15776
## x_rand[2]    5.93    0.00  0.37    5.19    5.69    5.93    6.17    6.67 16297
## lp__      -114.36    0.02  1.81 -118.62 -115.31 -114.02 -113.05 -111.88  6781
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:50:01 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(MTLD_RawFreq, Rawfreq, MTLD)

MTLD_tscore = rob.cor.mcmc(cbind(MTLD, tscore), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.1101038, SD = 0.1622575
## Posterior median and MAD:                  Median = -0.1144983, MAD = 0.1646903
## Rho values with 99% posterior probability: 99% HPDI = [-0.5139658, 0.2898092]
## Rho values with 95% posterior probability: 95% HPDI = [-0.4200336, 0.206725]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.7510625
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.2489375
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.362125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.1853125
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.2128125
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0344375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 6.25e-05
print(MTLD_tscore)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean    sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]       36.81    0.01  1.72   33.38   35.67   36.82   37.94   40.20 20889
## mu[2]        7.51    0.00  0.32    6.87    7.29    7.51    7.72    8.14 20542
## sigma[1]    10.34    0.01  1.31    8.13    9.42   10.22   11.13   13.27 19132
## sigma[2]     1.94    0.00  0.24    1.52    1.77    1.92    2.09    2.48 19573
## nu          25.83    0.10 14.20    7.19   15.43   23.03   33.00   61.29 21385
## rho         -0.11    0.00  0.16   -0.42   -0.22   -0.11    0.00    0.21 20676
## cov[1,1]   108.72    0.21 28.30   66.12   88.81  104.48  123.89  176.13 18122
## cov[1,2]    -2.28    0.03  3.56   -9.80   -4.44   -2.17   -0.01    4.37 16598
## cov[2,1]    -2.28    0.03  3.56   -9.80   -4.44   -2.17   -0.01    4.37 16598
## cov[2,2]     3.82    0.01  0.98    2.30    3.13    3.67    4.36    6.14 18626
## x_rand[1]   37.01    0.09 11.12   14.76   29.94   37.07   44.15   58.76 15963
## x_rand[2]    7.50    0.02  2.07    3.34    6.18    7.52    8.82   11.59 15286
## lp__      -182.86    0.02  1.74 -187.13 -183.81 -182.53 -181.58 -180.44  7481
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:50:12 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(MTLD_tscore, tscore, MTLD)

MTLD_MI = rob.cor.mcmc(cbind(MTLD, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.06902181, SD = 0.1640945
## Posterior median and MAD:                  Median = 0.07151461, MAD = 0.1664949
## Rho values with 99% posterior probability: 99% HPDI = [-0.3468243, 0.463558]
## Rho values with 95% posterior probability: 95% HPDI = [-0.2549611, 0.3830818]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.33625
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.66375
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4133125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.217375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.1635625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.020875
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0001875
print(MTLD_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean    sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]       36.93    0.01  1.72   33.51   35.78   36.94   38.10   40.34 20702
## mu[2]        1.83    0.00  0.06    1.71    1.79    1.83    1.86    1.94 21094
## sigma[1]    10.39    0.01  1.30    8.16    9.48   10.28   11.21   13.19 19971
## sigma[2]     0.34    0.00  0.04    0.27    0.31    0.34    0.37    0.44 19566
## nu          26.90    0.10 14.78    7.60   16.01   23.78   34.32   63.96 21429
## rho          0.07    0.00  0.16   -0.25   -0.04    0.07    0.18    0.39 20962
## cov[1,1]   109.73    0.20 27.99   66.66   89.90  105.58  125.67  174.02 18896
## cov[1,2]     0.25    0.00  0.63   -0.98   -0.14    0.24    0.62    1.54 17367
## cov[2,1]     0.25    0.00  0.63   -0.98   -0.14    0.24    0.62    1.54 17367
## cov[2,2]     0.12    0.00  0.03    0.07    0.10    0.11    0.13    0.19 18191
## x_rand[1]   36.99    0.09 11.30   14.47   29.75   36.90   44.18   59.33 16067
## x_rand[2]    1.83    0.00  0.37    1.11    1.60    1.83    2.06    2.57 16155
## lp__      -115.04    0.02  1.79 -119.40 -116.02 -114.70 -113.72 -112.57  7486
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:50:23 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(MTLD_MI, MI, MTLD)

Frequency

COCA_spoken_Frequency_Log_AW_RawFreq = rob.cor.mcmc(cbind(COCA_spoken_Frequency_Log_AW, Rawfreq), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.007302924, SD = 0.1641856
## Posterior median and MAD:                  Median = -0.00761488, MAD = 0.1679714
## Rho values with 99% posterior probability: 99% HPDI = [-0.4018594, 0.4156321]
## Rho values with 95% posterior probability: 95% HPDI = [-0.324452, 0.312316]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.5209375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.4790625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4515
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.2334375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.1301875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.012375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 6.25e-05
print(COCA_spoken_Frequency_Log_AW_RawFreq)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      3.17    0.00  0.01  3.14  3.16  3.17  3.17  3.19 20014    1
## mu[2]      5.92    0.00  0.06  5.80  5.88  5.92  5.96  6.03 19582    1
## sigma[1]   0.08    0.00  0.01  0.06  0.08  0.08  0.09  0.11 18208    1
## sigma[2]   0.34    0.00  0.04  0.27  0.31  0.34  0.37  0.44 18764    1
## nu        23.88    0.10 14.00  6.04 13.69 20.74 30.80 59.09 19261    1
## rho       -0.01    0.00  0.16 -0.32 -0.12 -0.01  0.10  0.31 19457    1
## cov[1,1]   0.01    0.00  0.00  0.00  0.01  0.01  0.01  0.01 17691    1
## cov[1,2]   0.00    0.00  0.01 -0.01  0.00  0.00  0.00  0.01 16427    1
## cov[2,1]   0.00    0.00  0.01 -0.01  0.00  0.00  0.00  0.01 16427    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.10  0.12  0.14  0.19 16976    1
## x_rand[1]  3.17    0.00  0.09  2.98  3.11  3.17  3.22  3.35 15865    1
## x_rand[2]  5.92    0.00  0.37  5.18  5.69  5.92  6.15  6.65 15664    1
## lp__      71.25    0.02  1.77 66.98 70.29 71.58 72.55 73.70  7125    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:50:35 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Frequency_Log_AW_RawFreq, Rawfreq, COCA_spoken_Frequency_Log_AW)

COCA_spoken_Frequency_Log_AW_tscore = rob.cor.mcmc(cbind(COCA_spoken_Frequency_Log_AW, tscore), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.08681079, SD = 0.1630215
## Posterior median and MAD:                  Median = -0.09007074, MAD = 0.1647332
## Rho values with 99% posterior probability: 99% HPDI = [-0.4790912, 0.3391191]
## Rho values with 95% posterior probability: 95% HPDI = [-0.398883, 0.2351174]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.7029375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.2970625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.3946875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.205375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.18325
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.025125
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0001875
print(COCA_spoken_Frequency_Log_AW_tscore)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      3.17    0.00  0.01  3.14  3.16  3.17  3.18  3.19 19214    1
## mu[2]      7.50    0.00  0.32  6.87  7.28  7.50  7.71  8.12 19243    1
## sigma[1]   0.08    0.00  0.01  0.07  0.08  0.08  0.09  0.11 18457    1
## sigma[2]   1.95    0.00  0.25  1.51  1.77  1.93  2.10  2.51 19243    1
## nu        23.07    0.10 13.78  5.92 13.09 19.88 29.56 58.68 20002    1
## rho       -0.09    0.00  0.16 -0.40 -0.20 -0.09  0.02  0.24 18155    1
## cov[1,1]   0.01    0.00  0.00  0.00  0.01  0.01  0.01  0.01 17612    1
## cov[1,2]  -0.01    0.00  0.03 -0.08 -0.03 -0.01  0.00  0.04 14791    1
## cov[2,1]  -0.01    0.00  0.03 -0.08 -0.03 -0.01  0.00  0.04 14791    1
## cov[2,2]   3.85    0.01  1.03  2.29  3.12  3.71  4.42  6.28 18327    1
## x_rand[1]  3.17    0.00  0.09  2.98  3.11  3.17  3.22  3.35 16149    1
## x_rand[2]  7.51    0.02  2.11  3.31  6.18  7.50  8.85 11.70 16094    1
## lp__       3.35    0.02  1.79 -1.02  2.41  3.69  4.67  5.79  7184    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:50:47 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Frequency_Log_AW_tscore, tscore, COCA_spoken_Frequency_Log_AW)

COCA_spoken_Frequency_Log_AW_MI = rob.cor.mcmc(cbind(COCA_spoken_Frequency_Log_AW, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.3387694, SD = 0.1460515
## Posterior median and MAD:                  Median = -0.3456793, MAD = 0.145145
## Rho values with 99% posterior probability: 99% HPDI = [-0.6695534, 0.06706539]
## Rho values with 95% posterior probability: 95% HPDI = [-0.6144263, -0.04786768]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.9838125
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.0161875
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.05825
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.024875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.740125
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.355375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.026875
print(COCA_spoken_Frequency_Log_AW_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      3.17    0.00  0.01  3.14  3.16  3.17  3.17  3.19 17372    1
## mu[2]      1.83    0.00  0.06  1.71  1.79  1.83  1.86  1.94 15757    1
## sigma[1]   0.08    0.00  0.01  0.06  0.08  0.08  0.09  0.11 16967    1
## sigma[2]   0.34    0.00  0.04  0.26  0.31  0.33  0.36  0.43 17503    1
## nu        22.08    0.10 13.50  5.42 12.25 18.95 28.48 56.59 20145    1
## rho       -0.34    0.00  0.15 -0.60 -0.44 -0.35 -0.25 -0.03 17753    1
## cov[1,1]   0.01    0.00  0.00  0.00  0.01  0.01  0.01  0.01 16785    1
## cov[1,2]  -0.01    0.00  0.01 -0.02 -0.01 -0.01 -0.01  0.00 13807    1
## cov[2,1]  -0.01    0.00  0.01 -0.02 -0.01 -0.01 -0.01  0.00 13807    1
## cov[2,2]   0.11    0.00  0.03  0.07  0.09  0.11  0.13  0.19 16797    1
## x_rand[1]  3.16    0.00  0.09  2.98  3.11  3.17  3.22  3.35 15768    1
## x_rand[2]  1.83    0.00  0.37  1.10  1.60  1.83  2.06  2.56 16061    1
## lp__      73.92    0.02  1.79 69.57 72.96 74.28 75.25 76.38  6936    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:50:58 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Frequency_Log_AW_MI, MI, COCA_spoken_Frequency_Log_AW)

Range

COCA_spoken_Range_Log_AW_RawFreq = rob.cor.mcmc(cbind(COCA_spoken_Range_Log_AW, Rawfreq), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.0370603, SD = 0.1688143
## Posterior median and MAD:                  Median = -0.03702124, MAD = 0.1722548
## Rho values with 99% posterior probability: 99% HPDI = [-0.4557317, 0.3977934]
## Rho values with 95% posterior probability: 95% HPDI = [-0.3517097, 0.3061047]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.58625
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.41375
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4313125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.2234375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.149625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0190625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.00025
print(COCA_spoken_Range_Log_AW_RawFreq)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]     -0.38    0.00  0.01 -0.40 -0.38 -0.38 -0.37 -0.36 20692    1
## mu[2]      5.92    0.00  0.06  5.80  5.88  5.92  5.96  6.03 19308    1
## sigma[1]   0.05    0.00  0.01  0.04  0.04  0.05  0.05  0.06 17584    1
## sigma[2]   0.34    0.00  0.04  0.27  0.31  0.34  0.37  0.44 17737    1
## nu        21.94    0.10 13.79  5.06 11.87 18.76 28.43 57.73 20476    1
## rho       -0.04    0.00  0.17 -0.36 -0.15 -0.04  0.08  0.30 18742    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.00 17732    1
## cov[1,2]   0.00    0.00  0.00 -0.01  0.00  0.00  0.00  0.01 15761    1
## cov[2,1]   0.00    0.00  0.00 -0.01  0.00  0.00  0.00  0.01 15761    1
## cov[2,2]   0.12    0.00  0.03  0.07  0.10  0.11  0.13  0.19 16738    1
## x_rand[1] -0.38    0.00  0.05 -0.49 -0.41 -0.38 -0.35 -0.28 16227    1
## x_rand[2]  5.92    0.00  0.37  5.18  5.69  5.92  6.16  6.66 16041    1
## lp__      92.66    0.02  1.80 88.28 91.70 92.99 93.97 95.16  7297    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:51:11 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Range_Log_AW_RawFreq, Rawfreq, COCA_spoken_Range_Log_AW)

COCA_spoken_Range_Log_AW_tscore = rob.cor.mcmc(cbind(COCA_spoken_Range_Log_AW, tscore), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.09699968, SD = 0.1690204
## Posterior median and MAD:                  Median = -0.09967865, MAD = 0.1721868
## Rho values with 99% posterior probability: 99% HPDI = [-0.5318362, 0.3245279]
## Rho values with 95% posterior probability: 95% HPDI = [-0.4210897, 0.2335799]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.716125
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.283875
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.377125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.1915
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.2065
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0348125
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 5e-04
print(COCA_spoken_Range_Log_AW_tscore)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]     -0.38    0.00  0.01 -0.40 -0.38 -0.38 -0.37 -0.36 19649    1
## mu[2]      7.49    0.00  0.32  6.85  7.27  7.49  7.70  8.12 19895    1
## sigma[1]   0.05    0.00  0.01  0.04  0.04  0.05  0.05  0.06 16854    1
## sigma[2]   1.92    0.00  0.25  1.49  1.75  1.90  2.08  2.49 18100    1
## nu        20.60    0.10 13.49  4.56 10.80 17.22 27.05 55.10 18167    1
## rho       -0.10    0.00  0.17 -0.42 -0.21 -0.10  0.02  0.24 18367    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.00 16534    1
## cov[1,2]  -0.01    0.00  0.02 -0.04 -0.02 -0.01  0.00  0.02 16089    1
## cov[2,1]  -0.01    0.00  0.02 -0.04 -0.02 -0.01  0.00  0.02 16089    1
## cov[2,2]   3.76    0.01  1.01  2.23  3.06  3.62  4.31  6.18 17138    1
## x_rand[1] -0.38    0.00  0.05 -0.48 -0.41 -0.38 -0.35 -0.27 15756    1
## x_rand[2]  7.50    0.02  2.10  3.27  6.18  7.49  8.82 11.62 16711    1
## lp__      24.76    0.02  1.78 20.46 23.83 25.07 26.07 27.19  7016    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:51:24 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Range_Log_AW_tscore, tscore, COCA_spoken_Range_Log_AW)

COCA_spoken_Range_Log_AW_MI = rob.cor.mcmc(cbind(COCA_spoken_Range_Log_AW, MI), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.3471224, SD = 0.1522736
## Posterior median and MAD:                  Median = -0.3552053, MAD = 0.152433
## Rho values with 99% posterior probability: 99% HPDI = [-0.7058717, 0.06425495]
## Rho values with 95% posterior probability: 95% HPDI = [-0.6390006, -0.05065239]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.9819375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.0180625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.05675
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0248125
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.746
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.3831875
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0364375
print(COCA_spoken_Range_Log_AW_MI)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]     -0.38    0.00  0.01 -0.39 -0.38 -0.38 -0.37 -0.36 17192    1
## mu[2]      1.82    0.00  0.06  1.71  1.79  1.82  1.86  1.93 15498    1
## sigma[1]   0.05    0.00  0.01  0.03  0.04  0.05  0.05  0.06 14130    1
## sigma[2]   0.33    0.00  0.05  0.25  0.30  0.33  0.36  0.43 14666    1
## nu        18.38    0.10 13.06  3.59  8.85 14.93 24.28 52.13 17942    1
## rho       -0.35    0.00  0.15 -0.62 -0.45 -0.36 -0.25 -0.02 17552    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.00 14537    1
## cov[1,2]  -0.01    0.00  0.00 -0.01 -0.01 -0.01  0.00  0.00 13794    1
## cov[2,1]  -0.01    0.00  0.00 -0.01 -0.01 -0.01  0.00  0.00 13794    1
## cov[2,2]   0.11    0.00  0.03  0.06  0.09  0.11  0.13  0.19 14249    1
## x_rand[1] -0.38    0.00  0.05 -0.49 -0.41 -0.38 -0.35 -0.27 15689    1
## x_rand[2]  1.82    0.00  0.38  1.08  1.59  1.82  2.05  2.57 16069    1
## lp__      95.22    0.02  1.80 90.80 94.29 95.58 96.54 97.66  7114    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:51:37 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Range_Log_AW_MI, MI, COCA_spoken_Range_Log_AW)

Correlational analysis on first response measures

Fluency rating (speech rate)

Fluency_Raw_first = rob.cor.mcmc(cbind(Fluency, Raw_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.3714333, SD = 0.1429251
## Posterior median and MAD:                  Median = -0.3805206, MAD = 0.1432005
## Rho values with 99% posterior probability: 99% HPDI = [-0.6897861, 0.02723236]
## Rho values with 95% posterior probability: 95% HPDI = [-0.6421761, -0.09231737]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.990375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.009625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.0364375
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0144375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.806375
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.4456875
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0390625
print(Fluency_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.52    0.00  0.03  0.47  0.50  0.52  0.54  0.58 17468    1
## mu[2]      5.97    0.00  0.10  5.79  5.91  5.97  6.04  6.16 17943    1
## sigma[1]   0.17    0.00  0.02  0.14  0.16  0.17  0.18  0.22 17238    1
## sigma[2]   0.58    0.00  0.07  0.46  0.53  0.57  0.63  0.74 16708    1
## nu        28.18    0.10 14.96  8.19 17.26 25.16 35.89 64.41 22011    1
## rho       -0.37    0.00  0.14 -0.62 -0.47 -0.38 -0.28 -0.07 15548    1
## cov[1,1]   0.03    0.00  0.01  0.02  0.02  0.03  0.03  0.05 16404    1
## cov[1,2]  -0.04    0.00  0.02 -0.08 -0.05 -0.04 -0.03 -0.01 12747    1
## cov[2,1]  -0.04    0.00  0.02 -0.08 -0.05 -0.04 -0.03 -0.01 12747    1
## cov[2,2]   0.34    0.00  0.09  0.21  0.28  0.33  0.39  0.55 15886    1
## x_rand[1]  0.52    0.00  0.18  0.15  0.41  0.52  0.64  0.89 15769    1
## x_rand[2]  5.97    0.00  0.62  4.73  5.58  5.98  6.38  7.18 15757    1
## lp__      27.36    0.02  1.78 23.08 26.41 27.69 28.65 29.82  7173    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:51:49 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Fluency_Raw_first, Raw_first, Fluency)

Fluency_tscore_first = rob.cor.mcmc(cbind(Fluency, tscore_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.2568913, SD = 0.153614
## Posterior median and MAD:                  Median = -0.2635442, MAD = 0.1554936
## Rho values with 99% posterior probability: 99% HPDI = [-0.6131416, 0.1669564]
## Rho values with 95% posterior probability: 95% HPDI = [-0.5494539, 0.04586674]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.947375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.052625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.140125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0658125
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.537
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.1784375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.007125
print(Fluency_tscore_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##             mean se_mean    sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]       0.52    0.00  0.03   0.46   0.50   0.52   0.54   0.58 19678    1
## mu[2]       8.59    0.00  0.56   7.47   8.21   8.58   8.96   9.69 19965    1
## sigma[1]    0.17    0.00  0.02   0.14   0.16   0.17   0.18   0.22 17729    1
## sigma[2]    3.40    0.00  0.43   2.67   3.10   3.36   3.65   4.35 17614    1
## nu         26.90    0.10 14.92   7.43  16.07  23.71  34.34  64.35 21001    1
## rho        -0.26    0.00  0.15  -0.54  -0.37  -0.26  -0.15   0.06 19883    1
## cov[1,1]    0.03    0.00  0.01   0.02   0.02   0.03   0.03   0.05 16421    1
## cov[1,2]   -0.15    0.00  0.11  -0.38  -0.22  -0.15  -0.08   0.03 15795    1
## cov[2,1]   -0.15    0.00  0.11  -0.38  -0.22  -0.15  -0.08   0.03 15795    1
## cov[2,2]   11.72    0.02  3.04   7.11   9.59  11.28  13.33  18.93 16422    1
## x_rand[1]   0.52    0.00  0.18   0.15   0.40   0.52   0.64   0.88 16241    1
## x_rand[2]   8.64    0.03  3.66   1.45   6.28   8.65  10.98  15.83 16689    1
## lp__      -43.44    0.02  1.79 -47.81 -44.41 -43.10 -42.14 -40.97  6747    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:52:00 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Fluency_tscore_first, tscore_first, Fluency)

Fluency_MI_first = rob.cor.mcmc(cbind(Fluency, MI_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.06085502, SD = 0.1607068
## Posterior median and MAD:                  Median = 0.06310896, MAD = 0.1611896
## Rho values with 99% posterior probability: 99% HPDI = [-0.3484289, 0.4729963]
## Rho values with 95% posterior probability: 95% HPDI = [-0.247167, 0.3805372]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.350875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.649125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4353125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.2251875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.1471875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.017625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 6.25e-05
print(Fluency_MI_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.52    0.00  0.03  0.47  0.50  0.52  0.54  0.58 19714    1
## mu[2]      2.02    0.00  0.08  1.86  1.96  2.02  2.07  2.18 21345    1
## sigma[1]   0.17    0.00  0.02  0.14  0.16  0.17  0.18  0.22 18480    1
## sigma[2]   0.48    0.00  0.06  0.38  0.44  0.48  0.52  0.61 18443    1
## nu        27.78    0.10 15.01  7.86 16.91 24.76 35.10 65.27 21112    1
## rho        0.06    0.00  0.16 -0.26 -0.05  0.06  0.17  0.37 19213    1
## cov[1,1]   0.03    0.00  0.01  0.02  0.02  0.03  0.03  0.05 17474    1
## cov[1,2]   0.01    0.00  0.01 -0.02  0.00  0.00  0.01  0.04 15476    1
## cov[2,1]   0.01    0.00  0.01 -0.02  0.00  0.00  0.01  0.04 15476    1
## cov[2,2]   0.24    0.00  0.06  0.14  0.19  0.23  0.27  0.38 17335    1
## x_rand[1]  0.52    0.00  0.18  0.16  0.41  0.52  0.64  0.89 15173    1
## x_rand[2]  2.02    0.00  0.52  1.00  1.68  2.02  2.35  3.05 15597    1
## lp__      31.56    0.02  1.79 27.15 30.61 31.91 32.88 34.00  7501    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:52:11 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Fluency_MI_first, MI_first, Fluency)

Articulation Rate

ArticulationRate_Raw_first = rob.cor.mcmc(cbind(ArticulationRate, Raw_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.3121715, SD = 0.1502635
## Posterior median and MAD:                  Median = -0.3221759, MAD = 0.152174
## Rho values with 99% posterior probability: 99% HPDI = [-0.6560713, 0.09047442]
## Rho values with 95% posterior probability: 95% HPDI = [-0.5912842, -0.01272845]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.9735625
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.0264375
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.083125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.037375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.673625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.2954375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.015375
print(ArticulationRate_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean     sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]      108.95    0.03   4.48  100.12  106.00  108.93  111.90  117.91 17263
## mu[2]        5.96    0.00   0.10    5.78    5.90    5.96    6.03    6.16 17114
## sigma[1]    26.54    0.03   3.42   20.66   24.13   26.28   28.66   34.10 17673
## sigma[2]     0.57    0.00   0.07    0.45    0.52    0.56    0.61    0.73 17596
## nu          24.29    0.10  14.02    6.32   13.97   21.21   31.29   60.01 18923
## rho         -0.31    0.00   0.15   -0.58   -0.42   -0.32   -0.21    0.01 18423
## cov[1,1]   716.17    1.45 188.22  426.67  582.33  690.39  821.15 1163.00 16838
## cov[1,2]    -4.85    0.02   2.81  -11.05   -6.48   -4.62   -2.95    0.08 15235
## cov[2,1]    -4.85    0.02   2.81  -11.05   -6.48   -4.62   -2.95    0.08 15235
## cov[2,2]     0.33    0.00   0.09    0.20    0.27    0.32    0.38    0.54 16969
## x_rand[1]  108.79    0.23  28.82   52.65   90.37  108.85  127.31  165.73 16041
## x_rand[2]    5.96    0.00   0.62    4.74    5.56    5.97    6.36    7.20 15825
## lp__      -170.75    0.02   1.80 -175.14 -171.68 -170.41 -169.44 -168.29  7587
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:52:22 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(ArticulationRate_Raw_first, Raw_first, ArticulationRate)

ArticulationRate_tscore_first = rob.cor.mcmc(cbind(ArticulationRate, tscore_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.1940523, SD = 0.1578336
## Posterior median and MAD:                  Median = -0.1998097, MAD = 0.157627
## Rho values with 99% posterior probability: 99% HPDI = [-0.5520304, 0.2315173]
## Rho values with 95% posterior probability: 95% HPDI = [-0.4897879, 0.1220478]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.88375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.11625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.229875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.1131875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.3765625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0924375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.001125
print(ArticulationRate_tscore_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean     sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]      109.01    0.03   4.40  100.30  106.04  109.00  111.96  117.71 20299
## mu[2]        8.58    0.00   0.57    7.46    8.20    8.58    8.96    9.70 19676
## sigma[1]    26.74    0.02   3.44   20.77   24.32   26.44   28.83   34.24 19180
## sigma[2]     3.39    0.00   0.43    2.66    3.09    3.35    3.65    4.34 17477
## nu          25.32    0.10  14.20    6.86   14.92   22.30   32.32   60.92 20522
## rho         -0.19    0.00   0.16   -0.48   -0.30   -0.20   -0.09    0.13 18409
## cov[1,1]   726.72    1.42 191.75  431.40  591.45  699.06  831.24 1172.58 18181
## cov[1,2]   -18.10    0.14  16.28  -52.68  -27.67  -17.20   -7.60   11.84 14373
## cov[2,1]   -18.10    0.14  16.28  -52.68  -27.67  -17.20   -7.60   11.84 14373
## cov[2,2]    11.70    0.02   3.04    7.08    9.56   11.25   13.35   18.87 16437
## x_rand[1]  108.91    0.23  29.01   51.21   90.70  109.02  127.23  166.46 15830
## x_rand[2]    8.63    0.03   3.61    1.52    6.27    8.65   10.99   15.82 15910
## lp__      -241.33    0.02   1.78 -245.72 -242.27 -240.99 -240.02 -238.89  6875
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:52:33 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(ArticulationRate_tscore_first, tscore_first, ArticulationRate)

ArticulationRate_MI_first = rob.cor.mcmc(cbind(ArticulationRate, MI_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.01924975, SD = 0.1650138
## Posterior median and MAD:                  Median = 0.02122846, MAD = 0.1698644
## Rho values with 99% posterior probability: 99% HPDI = [-0.387771, 0.431698]
## Rho values with 95% posterior probability: 95% HPDI = [-0.3103074, 0.3279178]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.4499375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.5500625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4401875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.228125
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.134125
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0134375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 6.25e-05
print(ArticulationRate_MI_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean     sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]      109.24    0.03   4.42  100.54  106.31  109.24  112.17  117.93 19918
## mu[2]        2.02    0.00   0.08    1.86    1.96    2.02    2.07    2.18 17830
## sigma[1]    26.70    0.03   3.47   20.72   24.27   26.39   28.80   34.36 18370
## sigma[2]     0.48    0.00   0.06    0.37    0.44    0.47    0.52    0.62 18332
## nu          24.90    0.10  14.27    6.64   14.66   21.75   31.90   60.26 19230
## rho          0.02    0.00   0.17   -0.31   -0.09    0.02    0.14    0.33 19028
## cov[1,1]   724.82    1.46 192.71  429.47  588.88  696.42  829.43 1180.90 17357
## cov[1,2]     0.26    0.02   2.27   -4.30   -1.16    0.25    1.67    4.78 15689
## cov[2,1]     0.26    0.02   2.27   -4.30   -1.16    0.25    1.67    4.78 15689
## cov[2,2]     0.23    0.00   0.06    0.14    0.19    0.23    0.27    0.38 17409
## x_rand[1]  109.41    0.23  28.71   52.35   91.05  109.68  127.77  165.75 15400
## x_rand[2]    2.01    0.00   0.53    0.96    1.68    2.01    2.35    3.07 14709
## lp__      -165.95    0.02   1.77 -170.23 -166.94 -165.63 -164.63 -163.49  7236
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:52:45 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(ArticulationRate_MI_first, MI_first, ArticulationRate)

Silent Pause ratio

SilentPause_Raw_first = rob.cor.mcmc(cbind(SilentPause, Raw_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.3293046, SD = 0.1479797
## Posterior median and MAD:                  Median = 0.336956, MAD = 0.1464312
## Rho values with 99% posterior probability: 99% HPDI = [-0.07243442, 0.666712]
## Rho values with 95% posterior probability: 95% HPDI = [0.04178649, 0.6124378]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.019625
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.980375
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.0685
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0285
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.721125
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.3321875
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.02225
print(SilentPause_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.59    0.00  0.04  0.52  0.57  0.59  0.62  0.66 18724    1
## mu[2]      5.96    0.00  0.10  5.78  5.90  5.96  6.03  6.16 17321    1
## sigma[1]   0.21    0.00  0.03  0.17  0.19  0.21  0.23  0.27 16383    1
## sigma[2]   0.58    0.00  0.07  0.45  0.53  0.57  0.62  0.74 17921    1
## nu        26.60    0.10 14.79  7.54 15.92 23.41 33.92 62.99 21473    1
## rho        0.33    0.00  0.15  0.02  0.23  0.34  0.43  0.59 18333    1
## cov[1,1]   0.05    0.00  0.01  0.03  0.04  0.04  0.05  0.07 15434    1
## cov[1,2]   0.04    0.00  0.02  0.00  0.03  0.04  0.06  0.09 14318    1
## cov[2,1]   0.04    0.00  0.02  0.00  0.03  0.04  0.06  0.09 14318    1
## cov[2,2]   0.34    0.00  0.09  0.21  0.28  0.33  0.39  0.55 16898    1
## x_rand[1]  0.59    0.00  0.23  0.13  0.44  0.59  0.74  1.05 15813    1
## x_rand[2]  5.97    0.01  0.63  4.73  5.57  5.97  6.37  7.22 15025    1
## lp__      17.93    0.02  1.79 13.55 16.98 18.26 19.24 20.39  7311    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:52:58 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(SilentPause_Raw_first, Raw_first, SilentPause)

SilentPause_tscore_first = rob.cor.mcmc(cbind(SilentPause, tscore_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.2025263, SD = 0.1573282
## Posterior median and MAD:                  Median = 0.2071733, MAD = 0.1586361
## Rho values with 99% posterior probability: 99% HPDI = [-0.219817, 0.5629524]
## Rho values with 95% posterior probability: 95% HPDI = [-0.1078573, 0.5019379]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.101375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.898625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.222
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.1126875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.398875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.104375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0021875
print(SilentPause_tscore_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##             mean se_mean    sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]       0.59    0.00  0.04   0.52   0.57   0.59   0.62   0.66 18275    1
## mu[2]       8.58    0.00  0.57   7.46   8.20   8.58   8.95   9.70 20160    1
## sigma[1]    0.21    0.00  0.03   0.17   0.19   0.21   0.23   0.27 16621    1
## sigma[2]    3.40    0.00  0.43   2.65   3.10   3.36   3.66   4.35 20223    1
## nu         26.33    0.10 14.57   7.42  15.70  23.19  33.49  63.20 22077    1
## rho         0.20    0.00  0.16  -0.12   0.10   0.21   0.31   0.49 17903    1
## cov[1,1]    0.05    0.00  0.01   0.03   0.04   0.04   0.05   0.08 15668    1
## cov[1,2]    0.15    0.00  0.13  -0.08   0.06   0.14   0.23   0.44 14465    1
## cov[2,1]    0.15    0.00  0.13  -0.08   0.06   0.14   0.23   0.44 14465    1
## cov[2,2]   11.74    0.02  3.04   7.04   9.58  11.30  13.40  18.89 19291    1
## x_rand[1]   0.59    0.00  0.23   0.13   0.44   0.59   0.74   1.05 16109    1
## x_rand[2]   8.58    0.03  3.67   1.26   6.24   8.59  10.92  15.89 15578    1
## lp__      -52.80    0.02  1.77 -57.09 -53.75 -52.50 -51.49 -50.35  7361    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:53:09 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(SilentPause_tscore_first, tscore_first, SilentPause)

SilentPause_MI_first = rob.cor.mcmc(cbind(SilentPause, MI_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.04499309, SD = 0.1632741
## Posterior median and MAD:                  Median = -0.04562353, MAD = 0.1686032
## Rho values with 99% posterior probability: 99% HPDI = [-0.4405363, 0.3704403]
## Rho values with 95% posterior probability: 95% HPDI = [-0.3545281, 0.2754869]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.6068125
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.3931875
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.433
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.226125
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.14075
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.014875
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0
print(SilentPause_MI_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.59    0.00  0.04  0.52  0.57  0.59  0.62  0.66 21554    1
## mu[2]      2.02    0.00  0.08  1.86  1.96  2.02  2.07  2.17 20777    1
## sigma[1]   0.21    0.00  0.03  0.17  0.19  0.21  0.23  0.27 18215    1
## sigma[2]   0.48    0.00  0.06  0.38  0.44  0.48  0.52  0.61 19877    1
## nu        25.51    0.10 14.09  6.90 15.25 22.63 32.60 61.19 21428    1
## rho       -0.04    0.00  0.16 -0.36 -0.16 -0.05  0.07  0.27 19376    1
## cov[1,1]   0.05    0.00  0.01  0.03  0.04  0.04  0.05  0.07 17155    1
## cov[1,2]   0.00    0.00  0.02 -0.04 -0.02  0.00  0.01  0.03 15920    1
## cov[2,1]   0.00    0.00  0.02 -0.04 -0.02  0.00  0.01  0.03 15920    1
## cov[2,2]   0.24    0.00  0.06  0.14  0.19  0.23  0.27  0.38 18796    1
## x_rand[1]  0.59    0.00  0.23  0.14  0.45  0.59  0.74  1.04 16174    1
## x_rand[2]  2.02    0.00  0.52  0.96  1.69  2.03  2.36  3.04 16186    1
## lp__      22.64    0.02  1.75 18.40 21.70 22.98 23.93 25.04  7367    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:53:20 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(SilentPause_MI_first, MI_first, SilentPause)

Filled pause ratio

FilledPause_Raw_first = rob.cor.mcmc(cbind(FilledPause, Raw_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.1729608, SD = 0.1603981
## Posterior median and MAD:                  Median = -0.1778422, MAD = 0.161257
## Rho values with 99% posterior probability: 99% HPDI = [-0.559818, 0.2466906]
## Rho values with 95% posterior probability: 95% HPDI = [-0.4772728, 0.1437702]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.8565625
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.1434375
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.2669375
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.132
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.333125
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.076125
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.001375
print(FilledPause_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.07    0.00  0.01  0.05  0.06  0.07  0.08  0.09 21876    1
## mu[2]      5.97    0.00  0.10  5.78  5.90  5.97  6.04  6.16 21540    1
## sigma[1]   0.06    0.00  0.01  0.05  0.06  0.06  0.07  0.08 19741    1
## sigma[2]   0.58    0.00  0.07  0.45  0.53  0.57  0.62  0.74 18581    1
## nu        24.85    0.10 14.28  6.71 14.33 21.77 31.85 61.20 20811    1
## rho       -0.17    0.00  0.16 -0.47 -0.28 -0.18 -0.07  0.15 19407    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.01 18801    1
## cov[1,2]  -0.01    0.00  0.01 -0.02 -0.01 -0.01  0.00  0.01 16225    1
## cov[2,1]  -0.01    0.00  0.01 -0.02 -0.01 -0.01  0.00  0.01 16225    1
## cov[2,2]   0.34    0.00  0.09  0.20  0.28  0.33  0.39  0.55 17542    1
## x_rand[1]  0.07    0.00  0.07 -0.06  0.03  0.07  0.11  0.20 15372    1
## x_rand[2]  5.98    0.00  0.62  4.73  5.59  5.98  6.37  7.20 15627    1
## lp__      64.67    0.02  1.79 60.32 63.72 65.00 66.00 67.14  6573    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:53:32 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(FilledPause_Raw_first, Raw_first, FilledPause)

FilledPause_tscore_first = rob.cor.mcmc(cbind(FilledPause, tscore_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.2925439, SD = 0.1512972
## Posterior median and MAD:                  Median = -0.2999707, MAD = 0.1499616
## Rho values with 99% posterior probability: 99% HPDI = [-0.6508023, 0.1171802]
## Rho values with 95% posterior probability: 95% HPDI = [-0.5703691, 0.01939991]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.9641875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.0358125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.0975
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0456875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.6310625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.24525
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0125
print(FilledPause_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.07    0.00  0.01  0.05  0.06  0.07  0.08  0.09 21876    1
## mu[2]      5.97    0.00  0.10  5.78  5.90  5.97  6.04  6.16 21540    1
## sigma[1]   0.06    0.00  0.01  0.05  0.06  0.06  0.07  0.08 19741    1
## sigma[2]   0.58    0.00  0.07  0.45  0.53  0.57  0.62  0.74 18581    1
## nu        24.85    0.10 14.28  6.71 14.33 21.77 31.85 61.20 20811    1
## rho       -0.17    0.00  0.16 -0.47 -0.28 -0.18 -0.07  0.15 19407    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.01 18801    1
## cov[1,2]  -0.01    0.00  0.01 -0.02 -0.01 -0.01  0.00  0.01 16225    1
## cov[2,1]  -0.01    0.00  0.01 -0.02 -0.01 -0.01  0.00  0.01 16225    1
## cov[2,2]   0.34    0.00  0.09  0.20  0.28  0.33  0.39  0.55 17542    1
## x_rand[1]  0.07    0.00  0.07 -0.06  0.03  0.07  0.11  0.20 15372    1
## x_rand[2]  5.98    0.00  0.62  4.73  5.59  5.98  6.37  7.20 15627    1
## lp__      64.67    0.02  1.79 60.32 63.72 65.00 66.00 67.14  6573    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:53:32 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(FilledPause_tscore_first, tscore_first, FilledPause)

FilledPause_MI_first = rob.cor.mcmc(cbind(FilledPause, MI_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.2554604, SD = 0.158861
## Posterior median and MAD:                  Median = -0.2627012, MAD = 0.1608616
## Rho values with 99% posterior probability: 99% HPDI = [-0.6221882, 0.1723432]
## Rho values with 95% posterior probability: 95% HPDI = [-0.5669873, 0.0478328]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.94025
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.05975
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.147375
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0689375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.530875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.18525
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0070625
print(FilledPause_MI_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.07    0.00  0.01  0.05  0.06  0.07  0.07  0.09 17430    1
## mu[2]      2.02    0.00  0.08  1.87  1.97  2.02  2.08  2.18 18514    1
## sigma[1]   0.06    0.00  0.01  0.05  0.05  0.06  0.07  0.08 15993    1
## sigma[2]   0.47    0.00  0.06  0.36  0.43  0.47  0.51  0.61 15824    1
## nu        22.41    0.10 13.82  5.19 12.40 19.29 29.03 57.52 18089    1
## rho       -0.26    0.00  0.16 -0.54 -0.37 -0.26 -0.15  0.07 17414    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.01 15823    1
## cov[1,2]  -0.01    0.00  0.01 -0.02 -0.01 -0.01  0.00  0.00 14120    1
## cov[2,1]  -0.01    0.00  0.01 -0.02 -0.01 -0.01  0.00  0.00 14120    1
## cov[2,2]   0.23    0.00  0.06  0.13  0.18  0.22  0.26  0.37 15270    1
## x_rand[1]  0.07    0.00  0.07 -0.06  0.03  0.07  0.11  0.20 15733    1
## x_rand[2]  2.02    0.00  0.52  0.98  1.70  2.02  2.35  3.05 15743    1
## lp__      72.82    0.02  1.80 68.44 71.86 73.16 74.12 75.31  7232    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:53:58 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(FilledPause_MI_first, MI_first, FilledPause)

Richness rating

Richness_Raw_first = rob.cor.mcmc(cbind(Richness, Raw_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.2615031, SD = 0.1532825
## Posterior median and MAD:                  Median = -0.2680534, MAD = 0.1543734
## Rho values with 99% posterior probability: 99% HPDI = [-0.6390891, 0.1408571]
## Rho values with 95% posterior probability: 95% HPDI = [-0.5515097, 0.04168982]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.9489375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.0510625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.1360625
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0626875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.5460625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.1865
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0070625
print(Richness_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.42    0.00  0.02  0.38  0.41  0.42  0.43  0.46 16493    1
## mu[2]      5.97    0.00  0.10  5.79  5.91  5.97  6.04  6.16 17806    1
## sigma[1]   0.13    0.00  0.02  0.10  0.11  0.12  0.14  0.16 16472    1
## sigma[2]   0.58    0.00  0.07  0.46  0.53  0.57  0.63  0.75 17729    1
## nu        28.35    0.11 15.14  8.20 17.30 25.25 36.13 65.40 19616    1
## rho       -0.26    0.00  0.15 -0.54 -0.37 -0.27 -0.16  0.05 18285    1
## cov[1,1]   0.02    0.00  0.00  0.01  0.01  0.02  0.02  0.03 15735    1
## cov[1,2]  -0.02    0.00  0.01 -0.05 -0.03 -0.02 -0.01  0.00 14582    1
## cov[2,1]  -0.02    0.00  0.01 -0.05 -0.03 -0.02 -0.01  0.00 14582    1
## cov[2,2]   0.34    0.00  0.09  0.21  0.28  0.33  0.39  0.56 16593    1
## x_rand[1]  0.42    0.00  0.14  0.15  0.33  0.42  0.51  0.69 15748    1
## x_rand[2]  5.98    0.00  0.62  4.74  5.59  5.97  6.38  7.21 16235    1
## lp__      37.84    0.02  1.81 33.35 36.90 38.20 39.16 40.29  6943    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:54:09 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Richness_Raw_first, Raw_first, Richness)

Richness_tscore_first = rob.cor.mcmc(cbind(Richness, tscore_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.09007894, SD = 0.1626942
## Posterior median and MAD:                  Median = 0.09240826, MAD = 0.1668717
## Rho values with 99% posterior probability: 99% HPDI = [-0.3180514, 0.4987509]
## Rho values with 95% posterior probability: 95% HPDI = [-0.2254739, 0.4031996]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.291
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.709
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.39075
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.199375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.1874375
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.026375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0003125
print(Richness_tscore_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##             mean se_mean    sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]       0.42    0.00  0.02   0.38   0.41   0.42   0.43   0.46 18224    1
## mu[2]       8.62    0.00  0.57   7.50   8.24   8.62   8.99   9.76 20104    1
## sigma[1]    0.13    0.00  0.02   0.10   0.11   0.12   0.14   0.16 18479    1
## sigma[2]    3.42    0.00  0.44   2.68   3.11   3.37   3.68   4.36 19793    1
## nu         27.20    0.10 14.72   7.69  16.54  24.20  34.72  64.15 20400    1
## rho         0.09    0.00  0.16  -0.23  -0.02   0.09   0.20   0.40 19161    1
## cov[1,1]    0.02    0.00  0.00   0.01   0.01   0.02   0.02   0.03 17740    1
## cov[1,2]    0.04    0.00  0.08  -0.11  -0.01   0.04   0.09   0.19 15954    1
## cov[2,1]    0.04    0.00  0.08  -0.11  -0.01   0.04   0.09   0.19 15954    1
## cov[2,2]   11.86    0.02  3.12   7.16   9.65  11.38  13.54  19.05 18516    1
## x_rand[1]   0.42    0.00  0.14   0.15   0.33   0.42   0.51   0.69 15943    1
## x_rand[2]   8.60    0.03  3.67   1.39   6.25   8.55  10.95  15.84 15939    1
## lp__      -32.69    0.02  1.78 -36.96 -33.66 -32.36 -31.38 -30.23  7608    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:54:20 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Richness_tscore_first, tscore_first, Richness)

Richness_MI_first = rob.cor.mcmc(cbind(Richness, MI_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.3442574, SD = 0.1452182
## Posterior median and MAD:                  Median = 0.3516802, MAD = 0.1434889
## Rho values with 99% posterior probability: 99% HPDI = [-0.0528074, 0.6800495]
## Rho values with 95% posterior probability: 95% HPDI = [0.05962729, 0.6212149]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.0149375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.9850625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.0525
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0230625
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.7533125
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.3684375
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.026625
print(Richness_MI_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      0.42    0.00  0.02  0.38  0.40  0.42  0.43  0.46 17183    1
## mu[2]      2.02    0.00  0.08  1.87  1.97  2.02  2.08  2.18 16208    1
## sigma[1]   0.13    0.00  0.02  0.10  0.11  0.12  0.13  0.16 16605    1
## sigma[2]   0.48    0.00  0.06  0.38  0.44  0.48  0.52  0.62 17237    1
## nu        26.98    0.11 14.57  7.63 16.26 24.03 34.49 62.90 19067    1
## rho        0.34    0.00  0.15  0.04  0.25  0.35  0.45  0.60 17965    1
## cov[1,1]   0.02    0.00  0.00  0.01  0.01  0.02  0.02  0.03 15899    1
## cov[1,2]   0.02    0.00  0.01  0.00  0.01  0.02  0.03  0.05 14008    1
## cov[2,1]   0.02    0.00  0.01  0.00  0.01  0.02  0.03  0.05 14008    1
## cov[2,2]   0.24    0.00  0.06  0.14  0.19  0.23  0.27  0.38 16734    1
## x_rand[1]  0.42    0.00  0.13  0.15  0.33  0.42  0.50  0.68 15343    1
## x_rand[2]  2.02    0.00  0.51  1.00  1.69  2.02  2.35  3.05 16368    1
## lp__      46.22    0.02  1.77 41.92 45.28 46.55 47.53 48.66  7784    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:54:31 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(Richness_MI_first, MI_first, Richness)

Diversity

MTLD_Raw_first = rob.cor.mcmc(cbind(MTLD, Raw_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.2074678, SD = 0.1576626
## Posterior median and MAD:                  Median = -0.2132759, MAD = 0.1590066
## Rho values with 99% posterior probability: 99% HPDI = [-0.5768855, 0.2157282]
## Rho values with 95% posterior probability: 95% HPDI = [-0.5074492, 0.09887879]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.9011875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.0988125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.215625
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.106375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.4120625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.1105625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0025
print(MTLD_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean    sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]       37.07    0.01  1.72   33.69   35.94   37.07   38.21   40.51 20230
## mu[2]        5.98    0.00  0.10    5.79    5.91    5.98    6.04    6.16 18987
## sigma[1]    10.36    0.01  1.31    8.15    9.44   10.24   11.15   13.26 18505
## sigma[2]     0.58    0.00  0.07    0.46    0.53    0.57    0.62    0.74 18734
## nu          27.22    0.10 14.72    7.52   16.42   24.26   34.85   64.00 21474
## rho         -0.21    0.00  0.16   -0.50   -0.32   -0.21   -0.10    0.11 20159
## cov[1,1]   109.02    0.21 28.30   66.35   89.08  104.83  124.36  175.85 17345
## cov[1,2]    -1.28    0.01  1.09   -3.67   -1.91   -1.21   -0.57    0.67 16335
## cov[2,1]    -1.28    0.01  1.09   -3.67   -1.91   -1.21   -0.57    0.67 16335
## cov[2,2]     0.34    0.00  0.09    0.21    0.28    0.33    0.39    0.55 17642
## x_rand[1]   37.02    0.09 11.17   14.79   29.89   37.02   44.18   58.79 16270
## x_rand[2]    5.98    0.00  0.63    4.75    5.58    5.98    6.38    7.23 16096
## lp__      -134.99    0.02  1.79 -139.30 -135.93 -134.66 -133.68 -132.54  7365
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:54:42 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(MTLD_Raw_first, Raw_first, MTLD)

MTLD_tscore_first = rob.cor.mcmc(cbind(MTLD, tscore_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.03919154, SD = 0.1616442
## Posterior median and MAD:                  Median = -0.03731191, MAD = 0.1649856
## Rho values with 99% posterior probability: 99% HPDI = [-0.4383311, 0.371957]
## Rho values with 95% posterior probability: 95% HPDI = [-0.3586273, 0.2680975]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.592625
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.407375
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4440625
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.2366875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.1360625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0138125
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0001875
print(MTLD_tscore_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean    sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]       36.88    0.01  1.71   33.49   35.74   36.88   38.01   40.25 20185
## mu[2]        8.60    0.00  0.57    7.49    8.24    8.60    8.97    9.72 21365
## sigma[1]    10.40    0.01  1.31    8.18    9.46   10.27   11.21   13.26 20597
## sigma[2]     3.41    0.00  0.44    2.67    3.10    3.37    3.67    4.37 20334
## nu          26.46    0.09 14.48    7.62   15.89   23.36   33.65   62.61 23538
## rho         -0.04    0.00  0.16   -0.35   -0.15   -0.04    0.07    0.28 20618
## cov[1,1]   109.78    0.20 28.29   66.89   89.48  105.41  125.65  175.75 19290
## cov[1,2]    -1.42    0.05  6.16  -13.92   -5.16   -1.25    2.35   10.65 15753
## cov[2,1]    -1.42    0.05  6.16  -13.92   -5.16   -1.25    2.35   10.65 15753
## cov[2,2]    11.80    0.02  3.11    7.15    9.59   11.36   13.47   19.09 18865
## x_rand[1]   36.97    0.09 11.22   14.34   29.97   37.11   44.16   59.13 15769
## x_rand[2]    8.60    0.03  3.69    1.33    6.26    8.59   10.93   15.87 16046
## lp__      -205.06    0.02  1.77 -209.40 -206.01 -204.72 -203.76 -202.60  6946
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:54:53 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(MTLD_tscore_first, tscore_first, MTLD)

MTLD_MI_first = rob.cor.mcmc(cbind(MTLD, MI_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.1532119, SD = 0.1582243
## Posterior median and MAD:                  Median = 0.1576082, MAD = 0.1609928
## Rho values with 99% posterior probability: 99% HPDI = [-0.2620053, 0.5301753]
## Rho values with 95% posterior probability: 95% HPDI = [-0.1605513, 0.4554807]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.1685625
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.8314375
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.3030625
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.1524375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.28225
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0565
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0008125
print(MTLD_MI_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##              mean se_mean    sd    2.5%     25%     50%     75%   97.5% n_eff
## mu[1]       36.96    0.01  1.72   33.58   35.84   36.95   38.10   40.34 20277
## mu[2]        2.02    0.00  0.08    1.87    1.97    2.02    2.08    2.18 20461
## sigma[1]    10.39    0.01  1.32    8.17    9.47   10.27   11.20   13.29 19575
## sigma[2]     0.49    0.00  0.06    0.38    0.44    0.48    0.52    0.62 19994
## nu          27.55    0.10 14.54    8.15   16.83   24.65   35.16   62.81 21333
## rho          0.15    0.00  0.16   -0.16    0.05    0.16    0.26    0.45 19988
## cov[1,1]   109.77    0.21 28.50   66.71   89.76  105.46  125.48  176.75 18247
## cov[1,2]     0.80    0.01  0.89   -0.85    0.22    0.76    1.31    2.70 16701
## cov[2,1]     0.80    0.01  0.89   -0.85    0.22    0.76    1.31    2.70 16701
## cov[2,2]     0.24    0.00  0.06    0.15    0.20    0.23    0.27    0.39 18733
## x_rand[1]   37.01    0.09 11.18   14.86   29.93   37.04   44.02   59.21 15940
## x_rand[2]    2.03    0.00  0.52    0.99    1.69    2.03    2.36    3.05 16245
## lp__      -128.36    0.02  1.76 -132.60 -129.30 -128.06 -127.06 -125.93  7412
##           Rhat
## mu[1]        1
## mu[2]        1
## sigma[1]     1
## sigma[2]     1
## nu           1
## rho          1
## cov[1,1]     1
## cov[1,2]     1
## cov[2,1]     1
## cov[2,2]     1
## x_rand[1]    1
## x_rand[2]    1
## lp__         1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:55:04 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(MTLD_MI_first, MI_first, MTLD)

Frequency

COCA_spoken_Frequency_Log_AW_Raw_first = rob.cor.mcmc(cbind(COCA_spoken_Frequency_Log_AW, Raw_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.08567511, SD = 0.1637677
## Posterior median and MAD:                  Median = 0.0893334, MAD = 0.1668686
## Rho values with 99% posterior probability: 99% HPDI = [-0.3269144, 0.4899783]
## Rho values with 95% posterior probability: 95% HPDI = [-0.2313197, 0.4036786]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.3020625
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.6979375
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.3953125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.2044375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.1790625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.02525
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.000375
print(COCA_spoken_Frequency_Log_AW_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      3.16    0.00  0.01  3.14  3.16  3.16  3.17  3.19 18325    1
## mu[2]      5.97    0.00  0.10  5.78  5.90  5.96  6.03  6.16 18253    1
## sigma[1]   0.08    0.00  0.01  0.06  0.08  0.08  0.09  0.11 16479    1
## sigma[2]   0.58    0.00  0.07  0.45  0.52  0.57  0.62  0.74 18095    1
## nu        22.93    0.10 14.17  5.19 12.53 19.60 29.89 58.50 19347    1
## rho        0.09    0.00  0.16 -0.24 -0.03  0.09  0.20  0.40 18205    1
## cov[1,1]   0.01    0.00  0.00  0.00  0.01  0.01  0.01  0.01 15984    1
## cov[1,2]   0.00    0.00  0.01 -0.01  0.00  0.00  0.01  0.02 14780    1
## cov[2,1]   0.00    0.00  0.01 -0.01  0.00  0.00  0.01  0.02 14780    1
## cov[2,2]   0.34    0.00  0.09  0.20  0.27  0.33  0.39  0.55 17216    1
## x_rand[1]  3.16    0.00  0.09  2.98  3.11  3.16  3.22  3.35 15840    1
## x_rand[2]  5.96    0.01  0.64  4.69  5.57  5.97  6.36  7.22 15338    1
## lp__      50.83    0.02  1.78 46.46 49.87 51.15 52.15 53.28  7669    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:55:16 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Frequency_Log_AW_Raw_first, Raw_first, COCA_spoken_Frequency_Log_AW)

COCA_spoken_Frequency_Log_AW_tscore_first = rob.cor.mcmc(cbind(COCA_spoken_Frequency_Log_AW, tscore_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.03696373, SD = 0.169242
## Posterior median and MAD:                  Median = 0.04004806, MAD = 0.1738711
## Rho values with 99% posterior probability: 99% HPDI = [-0.3965147, 0.4514541]
## Rho values with 95% posterior probability: 95% HPDI = [-0.3056386, 0.3489206]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.413875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.586125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.4248125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.2146875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.150875
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.01775
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.000125
print(COCA_spoken_Frequency_Log_AW_tscore_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##             mean se_mean    sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]       3.17    0.00  0.01   3.14   3.16   3.17   3.18   3.19 19050    1
## mu[2]       8.63    0.00  0.56   7.53   8.26   8.62   9.00   9.74 20099    1
## sigma[1]    0.08    0.00  0.01   0.06   0.07   0.08   0.09   0.11 16314    1
## sigma[2]    3.32    0.00  0.44   2.56   3.02   3.29   3.59   4.31 18067    1
## nu         20.02    0.10 13.04   4.62  10.61  16.78  25.98  54.00 18416    1
## rho         0.04    0.00  0.17  -0.29  -0.08   0.04   0.16   0.36 17366    1
## cov[1,1]    0.01    0.00  0.00   0.00   0.01   0.01   0.01   0.01 15820    1
## cov[1,2]    0.01    0.00  0.05  -0.09  -0.02   0.01   0.04   0.11 13103    1
## cov[2,1]    0.01    0.00  0.05  -0.09  -0.02   0.01   0.04   0.11 13103    1
## cov[2,2]   11.25    0.02  3.06   6.54   9.12  10.79  12.90  18.55 17545    1
## x_rand[1]   3.17    0.00  0.09   2.98   3.11   3.17   3.22   3.35 16490    1
## x_rand[2]   8.64    0.03  3.67   1.31   6.32   8.63  10.97  15.91 16183    1
## lp__      -18.08    0.02  1.80 -22.40 -19.01 -17.74 -16.76 -15.62  7248    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:55:28 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Frequency_Log_AW_tscore_first, tscore_first, COCA_spoken_Frequency_Log_AW)

COCA_spoken_Frequency_Log_AW_MI_first = rob.cor.mcmc(cbind(COCA_spoken_Frequency_Log_AW, MI_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.3604089, SD = 0.1481354
## Posterior median and MAD:                  Median = -0.368041, MAD = 0.1478013
## Rho values with 99% posterior probability: 99% HPDI = [-0.6982399, 0.03998982]
## Rho values with 95% posterior probability: 95% HPDI = [-0.6356368, -0.06681188]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.988
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.012
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.0458125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0186875
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.7799375
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.414125
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0409375
print(COCA_spoken_Frequency_Log_AW_MI_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]      3.17    0.00  0.01  3.14  3.16  3.17  3.18  3.19 17997    1
## mu[2]      2.03    0.00  0.08  1.87  1.97  2.03  2.08  2.18 17953    1
## sigma[1]   0.08    0.00  0.01  0.06  0.07  0.08  0.09  0.11 17004    1
## sigma[2]   0.47    0.00  0.06  0.37  0.43  0.47  0.51  0.61 16195    1
## nu        19.90    0.09 12.71  4.96 10.71 16.70 25.65 52.77 19200    1
## rho       -0.36    0.00  0.15 -0.63 -0.47 -0.37 -0.27 -0.05 18851    1
## cov[1,1]   0.01    0.00  0.00  0.00  0.01  0.01  0.01  0.01 16638    1
## cov[1,2]  -0.01    0.00  0.01 -0.03 -0.02 -0.01 -0.01  0.00 14613    1
## cov[2,1]  -0.01    0.00  0.01 -0.03 -0.02 -0.01 -0.01  0.00 14613    1
## cov[2,2]   0.23    0.00  0.06  0.14  0.19  0.22  0.26  0.37 15555    1
## x_rand[1]  3.17    0.00  0.09  2.99  3.11  3.17  3.22  3.35 15258    1
## x_rand[2]  2.03    0.00  0.53  0.99  1.70  2.03  2.36  3.06 16317    1
## lp__      60.75    0.02  1.76 56.41 59.83 61.07 62.03 63.17  7499    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:55:41 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Frequency_Log_AW_MI_first, MI_first, COCA_spoken_Frequency_Log_AW)

Range

COCA_spoken_Range_Log_AW_Raw_first = rob.cor.mcmc(cbind(COCA_spoken_Range_Log_AW, Raw_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.1224214, SD = 0.1646321
## Posterior median and MAD:                  Median = 0.1274023, MAD = 0.1658748
## Rho values with 99% posterior probability: 99% HPDI = [-0.3086172, 0.5208941]
## Rho values with 95% posterior probability: 95% HPDI = [-0.212691, 0.4267344]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.229375
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.770625
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.340125
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.1750625
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.23625
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.040625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.00075
print(COCA_spoken_Range_Log_AW_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]     -0.38    0.00  0.01 -0.40 -0.39 -0.38 -0.37 -0.36 20609    1
## mu[2]      5.96    0.00  0.10  5.77  5.90  5.96  6.03  6.16 20638    1
## sigma[1]   0.05    0.00  0.01  0.04  0.04  0.05  0.05  0.06 18612    1
## sigma[2]   0.58    0.00  0.07  0.45  0.53  0.57  0.62  0.74 18031    1
## nu        23.21    0.10 14.10  5.59 12.88 19.82 30.11 58.76 19714    1
## rho        0.12    0.00  0.16 -0.21  0.01  0.13  0.24  0.43 20027    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.00 17995    1
## cov[1,2]   0.00    0.00  0.01 -0.01  0.00  0.00  0.01  0.01 15789    1
## cov[2,1]   0.00    0.00  0.01 -0.01  0.00  0.00  0.01  0.01 15789    1
## cov[2,2]   0.34    0.00  0.09  0.20  0.28  0.33  0.39  0.55 17406    1
## x_rand[1] -0.38    0.00  0.05 -0.49 -0.41 -0.38 -0.35 -0.27 15137    1
## x_rand[2]  5.97    0.01  0.64  4.72  5.58  5.97  6.36  7.23 16080    1
## lp__      72.19    0.02  1.82 67.69 71.24 72.54 73.51 74.64  6918    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:55:54 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Range_Log_AW_Raw_first, Raw_first, COCA_spoken_Range_Log_AW)

COCA_spoken_Range_Log_AW_tscore_first = rob.cor.mcmc(cbind(COCA_spoken_Range_Log_AW, tscore_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = 0.08569614, SD = 0.1691503
## Posterior median and MAD:                  Median = 0.08676981, MAD = 0.1725701
## Rho values with 99% posterior probability: 99% HPDI = [-0.3566329, 0.4961165]
## Rho values with 95% posterior probability: 95% HPDI = [-0.2430071, 0.4128742]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.3108125
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.6891875
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.3899375
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.202375
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.193375
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.0310625
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.0005625
print(COCA_spoken_Range_Log_AW_Raw_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]     -0.38    0.00  0.01 -0.40 -0.39 -0.38 -0.37 -0.36 20609    1
## mu[2]      5.96    0.00  0.10  5.77  5.90  5.96  6.03  6.16 20638    1
## sigma[1]   0.05    0.00  0.01  0.04  0.04  0.05  0.05  0.06 18612    1
## sigma[2]   0.58    0.00  0.07  0.45  0.53  0.57  0.62  0.74 18031    1
## nu        23.21    0.10 14.10  5.59 12.88 19.82 30.11 58.76 19714    1
## rho        0.12    0.00  0.16 -0.21  0.01  0.13  0.24  0.43 20027    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.00 17995    1
## cov[1,2]   0.00    0.00  0.01 -0.01  0.00  0.00  0.01  0.01 15789    1
## cov[2,1]   0.00    0.00  0.01 -0.01  0.00  0.00  0.01  0.01 15789    1
## cov[2,2]   0.34    0.00  0.09  0.20  0.28  0.33  0.39  0.55 17406    1
## x_rand[1] -0.38    0.00  0.05 -0.49 -0.41 -0.38 -0.35 -0.27 15137    1
## x_rand[2]  5.97    0.01  0.64  4.72  5.58  5.97  6.36  7.23 16080    1
## lp__      72.19    0.02  1.82 67.69 71.24 72.54 73.51 74.64  6918    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:55:54 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Range_Log_AW_tscore_first, tscore_first, COCA_spoken_Range_Log_AW)

COCA_spoken_Range_Log_AW_MI_first = rob.cor.mcmc(cbind(COCA_spoken_Range_Log_AW, MI_first), iter=5000, warmup=1000, chains=4)

## POSTERIOR STATISTICS OF RHO
## Posterior mean and standard deviation:     Mean = -0.2792651, SD = 0.157066
## Posterior median and MAD:                  Median = -0.2869979, MAD = 0.1582793
## Rho values with 99% posterior probability: 99% HPDI = [-0.6514037, 0.130684]
## Rho values with 95% posterior probability: 95% HPDI = [-0.5669231, 0.04044759]
## Posterior probability that rho is ≤0 (probability of positive direction):      P(rho ≤ 0) = 0.9551875
## Posterior probability that rho is ≥0 (probability of negative direction):      P(rho ≥ 0) = 0.0448125
## Posterior probability that rho is weak:    P(-0.1 < rho < 0.1) = 0.1186875
## Posterior probability that rho is within ROPE, or % in ROPE:    P(-0.05 < rho < 0.05) = 0.0545
## 
## *Following Plonsky and Oswald’s (2014) benchmark:
## Posterior probability that rho is beyond small:    P(abs(rho) > .25) = 0.5903125
## Posterior probability that rho is beyond medium:   P(abs(rho) > .40) = 0.2291875
## Posterior probability that rho is beyond large:    P(abs(rho) > .60) = 0.01225
print(COCA_spoken_Range_Log_AW_MI_first)
## Inference for Stan model: robust_correlation.
## 4 chains, each with iter=5000; warmup=1000; thin=1; 
## post-warmup draws per chain=4000, total post-warmup draws=16000.
## 
##            mean se_mean    sd  2.5%   25%   50%   75% 97.5% n_eff Rhat
## mu[1]     -0.38    0.00  0.01 -0.40 -0.39 -0.38 -0.37 -0.36 17759    1
## mu[2]      2.02    0.00  0.08  1.86  1.97  2.02  2.08  2.18 17712    1
## sigma[1]   0.05    0.00  0.01  0.04  0.04  0.05  0.05  0.06 14775    1
## sigma[2]   0.47    0.00  0.06  0.36  0.43  0.46  0.51  0.61 17192    1
## nu        18.49    0.10 12.61  4.12  9.45 14.98 24.18 50.89 16011    1
## rho       -0.28    0.00  0.16 -0.56 -0.39 -0.29 -0.18  0.05 17528    1
## cov[1,1]   0.00    0.00  0.00  0.00  0.00  0.00  0.00  0.00 14718    1
## cov[1,2]  -0.01    0.00  0.00 -0.02 -0.01 -0.01  0.00  0.00 14166    1
## cov[2,1]  -0.01    0.00  0.00 -0.02 -0.01 -0.01  0.00  0.00 14166    1
## cov[2,2]   0.22    0.00  0.06  0.13  0.18  0.21  0.26  0.37 16467    1
## x_rand[1] -0.38    0.00  0.05 -0.48 -0.41 -0.38 -0.35 -0.27 15929    1
## x_rand[2]  2.02    0.00  0.53  0.96  1.69  2.02  2.35  3.05 15852    1
## lp__      80.95    0.02  1.79 76.57 80.02 81.31 82.27 83.40  6918    1
## 
## Samples were drawn using NUTS(diag_e) at Sun Dec 13 18:56:20 2020.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
posterior_scatter(COCA_spoken_Range_Log_AW_MI_first, MI_first, COCA_spoken_Range_Log_AW)

Bayesian regression with brms

get_prior(Fluency ~ Raw_first + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian())
##                    prior     class                                   coef group
## 1                                b                                             
## 2                                b                              Raw_first      
## 3                                b scaleIndexCOCA_spoken_Frequency_Log_AW      
## 4 student_t(3, 0.5, 2.5) Intercept                                             
## 5   student_t(3, 0, 2.5)     sigma                                             
##   resp dpar nlpar bound
## 1                      
## 2                      
## 3                      
## 4                      
## 5

Oral Fluency

Flunecy rating

get_prior(scale(Fluency) ~ scale(Raw_first) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian())
##                     prior     class                                   coef
## 1                                 b                                       
## 2                                 b scaleIndexCOCA_spoken_Frequency_Log_AW
## 3                                 b                         scaleRaw_first
## 4 student_t(3, -0.1, 2.5) Intercept                                       
## 5    student_t(3, 0, 2.5)     sigma                                       
##   group resp dpar nlpar bound
## 1                            
## 2                            
## 3                            
## 4                            
## 5
ggplot(data = data, aes(x = Raw_first, y = Fluency))+
  geom_point() +
  geom_smooth()

fluency0 <- brm(scale(Fluency) ~ 1 , data = data, 
                   family = gaussian(),
                save_all_pars = T,
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)
fluency1.wp <- brm(scale(Fluency) ~ scale(Raw_first), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                   save_all_pars = T,
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)

print(summary(fluency1.wp), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(Fluency) ~ scale(Raw_first) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## Intercept       0.00133   0.15312 -0.30953  0.30229 0.99989    13926    10520
## scaleRaw_first -0.37623   0.15387 -0.67745 -0.07337 1.00012    14465    10682
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.96352   0.11623  0.76844  1.22631 1.00030    12801    10645
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fluency1.wp)

bayes_R2(fluency1.wp)
##     Estimate  Est.Error        Q2.5     Q97.5
## R2 0.1490404 0.08776529 0.006286335 0.3294695
conditional_effects(fluency1.wp)

fluency2.wp <- brm(scale(Fluency) ~ scale(Raw_first) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                   save_all_pars = T,
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)

print(summary(fluency2.wp), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(Fluency) ~ scale(Raw_first) + scale(IndexCOCA_spoken_Frequency_Log_AW) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                                        Estimate Est.Error l-95% CI u-95% CI
## Intercept                              -0.00025   0.15062 -0.29991  0.29575
## scaleRaw_first                         -0.27151   0.18083 -0.62698  0.08694
## scaleIndexCOCA_spoken_Frequency_Log_AW -0.19981   0.18001 -0.55025  0.15885
##                                           Rhat Bulk_ESS Tail_ESS
## Intercept                              1.00030    14746    10009
## scaleRaw_first                         0.99995    13902    11152
## scaleIndexCOCA_spoken_Frequency_Log_AW 1.00015    13833    11513
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.95994   0.11544  0.76648  1.21740 1.00034    13378    11252
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fluency2.wp)

bayes_R2(fluency2.wp)
##     Estimate  Est.Error       Q2.5     Q97.5
## R2 0.1897306 0.08997675 0.02814451 0.3676389
conditional_effects(fluency2.wp)

model_comp(fluency0, fluency1.wp, fluency2.wp)
## [[1]]
##            elpd_diff  se_diff  elpd_loo se_elpd_loo    p_loo  se_p_loo    looic
## Model_1     0.000000 0.000000 -55.84954    3.348564 2.329352 0.4639848 111.6991
## Model_2    -0.527218 1.482041 -56.37676    3.290685 3.539986 0.7205831 112.7535
## Null_Model -2.182007 2.288949 -58.03154    3.081919 1.421840 0.2365736 116.0631
##            se_looic
## Model_1    6.697128
## Model_2    6.581370
## Null_Model 6.163839
## 
## [[2]]
##             elpd_diff  se_diff elpd_waic se_elpd_waic   p_waic se_p_waic
## Model_1     0.0000000 0.000000 -55.83033     3.342733 2.310142 0.4565385
## Model_2    -0.4846285 1.471346 -56.31496     3.273152 3.478186 0.6959414
## Null_Model -2.1974237 2.283824 -58.02775     3.080847 1.418047 0.2353892
##                waic  se_waic
## Model_1    111.6607 6.685467
## Model_2    112.6299 6.546303
## Null_Model 116.0555 6.161693
## 
## [[3]]

## 
## [[4]]

loo_model_weights(fluency0, fluency1.wp, fluency2.wp, model_names = NULL)
## Method: stacking
## ------
##             weight
## fluency0    0.049 
## fluency1.wp 0.748 
## fluency2.wp 0.203
fluency_ave <- posterior_average(fluency1.wp, fluency2.wp, weights = "stacking")
quantile(fluency_ave$b_scaleRaw_first, prob = c(.025, .975))
##         2.5%        97.5% 
## -0.664859216 -0.004306449
mcmc_areas(fluency_ave, pars = "b_scaleRaw_first",  prob = .95)+
  labs(x = "Regression coefficent (std)") 

Articulation Rate

get_prior(scale(ArticulationRate) ~ scale(Raw_first) + scale(tscore_first) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian()
                    )
##                  prior     class                                   coef group
## 1                              b                                             
## 2                              b scaleIndexCOCA_spoken_Frequency_Log_AW      
## 3                              b                         scaleRaw_first      
## 4                              b                      scaletscore_first      
## 5 student_t(3, 0, 2.5) Intercept                                             
## 6 student_t(3, 0, 2.5)     sigma                                             
##   resp dpar nlpar bound
## 1                      
## 2                      
## 3                      
## 4                      
## 5                      
## 6
ggplot(data = data, aes(x = log(Raw_first), y = ArticulationRate))+
  geom_point() +
  geom_smooth()

ArtRate0 <- brm(scale(ArticulationRate) ~ 1 , data = data, 
                   family = gaussian(),
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)
ArtRate1 <- brm(scale(ArticulationRate) ~ scale(Raw_first) , data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)

print(summary(ArtRate1), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(ArticulationRate) ~ scale(Raw_first) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## Intercept       0.00009   0.15379 -0.30246  0.29807 1.00003    13309    10411
## scaleRaw_first -0.32349   0.15719 -0.63269 -0.00992 1.00008    15489    11842
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.98480   0.11808  0.78712  1.24176 1.00008    13193    11424
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(ArtRate1)

bayes_R2(ArtRate1)
##     Estimate  Est.Error        Q2.5     Q97.5
## R2 0.1157661 0.07958506 0.001728084 0.2904245
conditional_effects(ArtRate1)

ggplot(data, aes(tscore_first, ArticulationRate)) +
  geom_point() +
  geom_smooth()

ArtRate2 <- brm(scale(ArticulationRate) ~ scale(Raw_first) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)

print(summary(ArtRate2), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(ArticulationRate) ~ scale(Raw_first) + scale(IndexCOCA_spoken_Frequency_Log_AW) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                                        Estimate Est.Error l-95% CI u-95% CI
## Intercept                              -0.00073   0.15834 -0.30771  0.31118
## scaleRaw_first                         -0.25761   0.18304 -0.62123  0.09592
## scaleIndexCOCA_spoken_Frequency_Log_AW -0.12533   0.18474 -0.49038  0.23835
##                                           Rhat Bulk_ESS Tail_ESS
## Intercept                              1.00021    14891    11008
## scaleRaw_first                         1.00007    13819    11335
## scaleIndexCOCA_spoken_Frequency_Log_AW 0.99988    12754    11538
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.98998   0.11973  0.78986  1.25491 1.00023    13827    10831
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(ArtRate2)

bayes_R2(ArtRate2)
##     Estimate  Est.Error       Q2.5     Q97.5
## R2 0.1418902 0.08181481 0.01174893 0.3156237
conditional_effects(ArtRate2)

model_comp(ArtRate0, ArtRate1, ArtRate2)
## [[1]]
##             elpd_diff   se_diff  elpd_loo se_elpd_loo    p_loo  se_p_loo
## Model_1     0.0000000 0.0000000 -57.05483    4.013256 2.916145 0.6900494
## Model_2    -0.8417713 0.7124457 -57.89660    4.063434 3.843419 0.8495668
## Null_Model -1.2410619 2.3108760 -58.29590    4.493810 1.876090 0.5849525
##               looic se_looic
## Model_1    114.1097 8.026512
## Model_2    115.7932 8.126868
## Null_Model 116.5918 8.987621
## 
## [[2]]
##             elpd_diff   se_diff elpd_waic se_elpd_waic   p_waic se_p_waic
## Model_1     0.0000000 0.0000000 -57.01941     4.003132 2.880722 0.6777534
## Model_2    -0.8029299 0.7090032 -57.82234     4.043105 3.769154 0.8254196
## Null_Model -1.2640034 2.3052202 -58.28341     4.487365 1.863608 0.5775197
##                waic  se_waic
## Model_1    114.0388 8.006264
## Model_2    115.6447 8.086210
## Null_Model 116.5668 8.974730
## 
## [[3]]

## 
## [[4]]

loo_model_weights(ArtRate0, ArtRate1, ArtRate2, model_names = NULL)
## Method: stacking
## ------
##          weight
## ArtRate0 0.246 
## ArtRate1 0.754 
## ArtRate2 0.000
art_rate_ave <- posterior_average(ArtRate1, ArtRate2, weights = "stacking")
quantile(art_rate_ave$b_scaleRaw_first, prob = c(.025, .975))
##         2.5%        97.5% 
## -0.632694066 -0.009917758
mcmc_areas(art_rate_ave, pars = "b_scaleRaw_first",  prob = .95)+
  labs(x = "Regression coefficent (std)") 

Silent pauses

get_prior(scale(SilentPause) ~ scale(Raw_first) + scale(tscore_first) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian())
##                     prior     class                                   coef
## 1                                 b                                       
## 2                                 b scaleIndexCOCA_spoken_Frequency_Log_AW
## 3                                 b                         scaleRaw_first
## 4                                 b                      scaletscore_first
## 5 student_t(3, -0.2, 2.5) Intercept                                       
## 6    student_t(3, 0, 2.5)     sigma                                       
##   group resp dpar nlpar bound
## 1                            
## 2                            
## 3                            
## 4                            
## 5                            
## 6
silent0 <- brm(scale(SilentPause) ~ 1, data = data, 
                   family = gaussian(),
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)
silent1 <- brm(scale(SilentPause) ~ scale(Raw_first), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)

print(summary(silent1), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(SilentPause) ~ scale(Raw_first) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## Intercept      -0.00198   0.15494 -0.31139  0.30206 1.00022    14134    10491
## scaleRaw_first  0.34628   0.15620  0.04019  0.65551 1.00039    14131    11384
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.97562   0.11535  0.78213  1.23429 1.00023    12781    10127
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(silent1)

bayes_R2(silent1)
##     Estimate  Est.Error        Q2.5    Q97.5
## R2 0.1293257 0.08345256 0.003133629 0.308794
conditional_effects(silent1)

silent2 <- brm(scale(SilentPause) ~ scale(Raw_first)  + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)

print(summary(silent2), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(SilentPause) ~ scale(Raw_first) + scale(IndexCOCA_spoken_Frequency_Log_AW) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                                        Estimate Est.Error l-95% CI u-95% CI
## Intercept                              -0.00319   0.15432 -0.30441  0.30084
## scaleRaw_first                          0.38872   0.18371  0.02500  0.74222
## scaleIndexCOCA_spoken_Frequency_Log_AW -0.08082   0.18419 -0.44438  0.28142
##                                           Rhat Bulk_ESS Tail_ESS
## Intercept                              1.00016    15528    10989
## scaleRaw_first                         1.00016    13399    12185
## scaleIndexCOCA_spoken_Frequency_Log_AW 1.00005    14162    11463
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.98625   0.11924  0.78583  1.25479 1.00020    13105    11159
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(silent2)

bayes_R2(silent2)
##     Estimate  Est.Error      Q2.5     Q97.5
## R2 0.1492143 0.08320292 0.0129555 0.3213162
conditional_effects(silent2)

model_comp(silent0, silent1, silent2)
## [[1]]
##            elpd_diff  se_diff  elpd_loo se_elpd_loo    p_loo  se_p_loo    looic
## Model_1     0.000000 0.000000 -56.55099    4.054563 2.644356 0.6887362 113.1020
## Model_2    -1.049256 0.600790 -57.60024    4.030346 3.745541 0.9388344 115.2005
## Null_Model -1.743217 2.062665 -58.29421    4.422236 1.853529 0.6492143 116.5884
##            se_looic
## Model_1    8.109126
## Model_2    8.060692
## Null_Model 8.844472
## 
## [[2]]
##            elpd_diff   se_diff elpd_waic se_elpd_waic   p_waic se_p_waic
## Model_1     0.000000 0.0000000 -56.51679     4.039883 2.610157 0.6722935
## Model_2    -1.020433 0.5984865 -57.53722     4.007394 3.682519 0.9144548
## Null_Model -1.757910 2.0606844 -58.27470     4.409239 1.834023 0.6336745
##                waic  se_waic
## Model_1    113.0336 8.079766
## Model_2    115.0744 8.014787
## Null_Model 116.5494 8.818479
## 
## [[3]]

## 
## [[4]]

Bayesian stacking

loo_model_weights(silent0, silent1, silent2, model_names = NULL)
## Method: stacking
## ------
##         weight
## silent0 0.078 
## silent1 0.922 
## silent2 0.000

Averaged posterior

silent_ave <- posterior_average(silent1, silent2, weights = "stacking")
quantile(silent_ave$b_scaleRaw_first, prob = c(.025, .975))
##       2.5%      97.5% 
## 0.04018646 0.65551027
mcmc_areas(silent_ave, pars = "b_scaleRaw_first",  prob = .95)+
  labs(x = "Regression coefficent (std)") 

Filled pauses

get_prior(scale(FilledPause) ~ scale(Rawfreq) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian())
##                     prior     class                                   coef
## 1                                 b                                       
## 2                                 b scaleIndexCOCA_spoken_Frequency_Log_AW
## 3                                 b                           scaleRawfreq
## 4 student_t(3, -0.4, 2.5) Intercept                                       
## 5    student_t(3, 0, 2.5)     sigma                                       
##   group resp dpar nlpar bound
## 1                            
## 2                            
## 3                            
## 4                            
## 5
FilledPause0 <- brm(scale(FilledPause) ~ 1, data = data, 
                   family = gaussian(),
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)
FilledPause1 <- brm(scale(FilledPause) ~ scale(Rawfreq), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)


print(summary(FilledPause1), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(FilledPause) ~ scale(Rawfreq) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##              Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## Intercept    -0.00199   0.15889 -0.31661  0.31154 1.00042    14265    11318
## scaleRawfreq -0.29118   0.15771 -0.59954  0.01948 0.99994    14852    11245
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.99700   0.11887  0.80014  1.26327 1.00013    14585    11828
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(FilledPause1)

bayes_R2(FilledPause1)
##      Estimate  Est.Error         Q2.5     Q97.5
## R2 0.09764099 0.07386499 0.0007376389 0.2642808
conditional_effects(FilledPause1)

FilledPause2 <- brm(scale(FilledPause) ~ scale(Rawfreq) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed = 1234)

print(summary(FilledPause2), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(FilledPause) ~ scale(Rawfreq) + scale(IndexCOCA_spoken_Frequency_Log_AW) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                                        Estimate Est.Error l-95% CI u-95% CI
## Intercept                              -0.00196   0.16248 -0.32379  0.31748
## scaleRawfreq                           -0.29053   0.18229 -0.65280  0.06313
## scaleIndexCOCA_spoken_Frequency_Log_AW  0.00577   0.18455 -0.35764  0.37398
##                                           Rhat Bulk_ESS Tail_ESS
## Intercept                              0.99989    13670    10435
## scaleRawfreq                           1.00005    14010    11784
## scaleIndexCOCA_spoken_Frequency_Log_AW 1.00023    14376    12051
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  1.01079   0.12204  0.80642  1.28420 1.00027    13410    11329
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(FilledPause2)

bayes_R2(FilledPause2)
##     Estimate  Est.Error        Q2.5     Q97.5
## R2 0.1134629 0.07321546 0.006932863 0.2750717
conditional_effects(FilledPause2)

model_comp(FilledPause0, FilledPause1, FilledPause2)
## [[1]]
##             elpd_diff   se_diff  elpd_loo se_elpd_loo    p_loo  se_p_loo
## Model_1     0.0000000 0.0000000 -57.84599    5.566585 3.383664 1.2370443
## Null_Model -0.3828682 2.4654512 -58.22886    4.255693 1.774056 0.4944716
## Model_2    -1.1206192 0.1919768 -58.96661    5.584871 4.321973 1.4838764
##               looic  se_looic
## Model_1    115.6920 11.133170
## Null_Model 116.4577  8.511385
## Model_2    117.9332 11.169743
## 
## [[2]]
##             elpd_diff  se_diff elpd_waic se_elpd_waic   p_waic se_p_waic
## Model_1     0.0000000 0.000000 -57.77300     5.524608 3.310672 1.1923489
## Null_Model -0.4443288 2.433204 -58.21733     4.250337 1.762525 0.4884505
## Model_2    -1.0536548 0.178447 -58.82666     5.515259 4.182016 1.4093829
##                waic   se_waic
## Model_1    115.5460 11.049216
## Null_Model 116.4347  8.500673
## Model_2    117.6533 11.030519
## 
## [[3]]

## 
## [[4]]

Bayesian stacking

loo_model_weights(FilledPause0, FilledPause1, FilledPause2, model_names = NULL)
## Method: stacking
## ------
##              weight
## FilledPause0 0.369 
## FilledPause1 0.631 
## FilledPause2 0.000

averaged posterior

filled_ave <- posterior_average(FilledPause1, FilledPause2, weights = "stacking")
quantile(filled_ave$b_scaleRawfreq, prob = c(.025, .975))
##        2.5%       97.5% 
## -0.59954064  0.01948036
mcmc_areas(filled_ave, pars = "b_scaleRawfreq",  prob = .95)+
  labs(x = "Regression coefficent (std)") 

Lexis

Richness

get_prior(scale(Richness) ~  scale(MI_first) + scale(Rawfreq) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, family = gaussian())
##                    prior     class                                   coef group
## 1                                b                                             
## 2                                b scaleIndexCOCA_spoken_Frequency_Log_AW      
## 3                                b                          scaleMI_first      
## 4                                b                           scaleRawfreq      
## 5 student_t(3, 0.2, 2.5) Intercept                                             
## 6   student_t(3, 0, 2.5)     sigma                                             
##   resp dpar nlpar bound
## 1                      
## 2                      
## 3                      
## 4                      
## 5                      
## 6
Richness0 <- brm(scale(Richness) ~  1, data = data, 
                   family = gaussian(),
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)
Richness1 <- brm(scale(Richness) ~  scale(MI_first), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)


print(summary(Richness1), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(Richness) ~ scale(MI_first) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##               Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## Intercept      0.00016   0.15367 -0.30599  0.29952 1.00002    15782    11232
## scaleMI_first  0.35380   0.15315  0.05313  0.65514 1.00020    13973    10481
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.97205   0.11429  0.78056  1.22605 1.00018    13921    10483
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(Richness1)

bayes_R2(Richness1)
##    Estimate  Est.Error        Q2.5     Q97.5
## R2 0.133816 0.08371075 0.004042428 0.3106514
conditional_effects(Richness1)

Richness2 <- brm(scale(Richness) ~  scale(MI_first) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4)


print(summary(Richness2), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(Richness) ~ scale(MI_first) + scale(IndexCOCA_spoken_Frequency_Log_AW) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                                        Estimate Est.Error l-95% CI u-95% CI
## Intercept                               0.00158   0.14841 -0.29019  0.29214
## scaleMI_first                           0.32033   0.14890  0.02482  0.61188
## scaleIndexCOCA_spoken_Frequency_Log_AW -0.29063   0.14946 -0.58401  0.00193
##                                           Rhat Bulk_ESS Tail_ESS
## Intercept                              1.00009    15712    10977
## scaleMI_first                          1.00018    16705    11969
## scaleIndexCOCA_spoken_Frequency_Log_AW 1.00017    16407    11967
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.93674   0.11273  0.74586  1.18489 1.00036    14428    11986
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(Richness2)

bayes_R2(Richness2)
##     Estimate  Est.Error       Q2.5     Q97.5
## R2 0.2226223 0.09351495 0.04359286 0.3995478
conditional_effects(Richness2)

model_comp(Richness0, Richness1, Richness2)
## [[1]]
##            elpd_diff  se_diff  elpd_loo se_elpd_loo    p_loo  se_p_loo    looic
## Model_2     0.000000 0.000000 -55.10282    3.808640 3.077318 0.6817186 110.2056
## Model_1    -1.115905 1.896598 -56.21873    3.708801 2.341153 0.4944896 112.4375
## Null_Model -2.949545 2.935685 -58.05237    3.287924 1.471525 0.2569668 116.1047
##            se_looic
## Model_2    7.617280
## Model_1    7.417602
## Null_Model 6.575848
## 
## [[2]]
##            elpd_diff  se_diff elpd_waic se_elpd_waic   p_waic se_p_waic
## Model_2     0.000000 0.000000 -55.06358     3.795063 3.038071 0.6666369
## Model_1    -1.135639 1.891523 -56.19922     3.702258 2.321640 0.4870916
## Null_Model -2.983141 2.928124 -58.04672     3.286238 1.465874 0.2551602
##                waic  se_waic
## Model_2    110.1272 7.590125
## Model_1    112.3984 7.404516
## Null_Model 116.0934 6.572476
## 
## [[3]]

## 
## [[4]]

Bayesian stacking

loo_model_weights(Richness0, Richness1, Richness2, model_names = NULL)
## Method: stacking
## ------
##           weight
## Richness0 0.137 
## Richness1 0.000 
## Richness2 0.863

Average posterior

lexis_ave <- posterior_average(Richness1, Richness2, weights = "stacking")
quantile(lexis_ave$b_scaleMI_first, prob = c(.025, .975))
##      2.5%     97.5% 
## 0.0277728 0.6167806
mcmc_areas(lexis_ave, pars = "b_scaleMI_first",  prob = .95)+
  labs(x = "Regression coefficent (std)") 

Frequency

get_prior(scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data,  family = gaussian())
##                    prior     class                                   coef group
## 1                                b                                             
## 2                                b scaleIndexCOCA_spoken_Frequency_Log_AW      
## 3                                b                                scaleMI      
## 4 student_t(3, 0.1, 2.5) Intercept                                             
## 5   student_t(3, 0, 2.5)     sigma                                             
##   resp dpar nlpar bound
## 1                      
## 2                      
## 3                      
## 4                      
## 5
freq_first0 <- brm(scale(COCA_spoken_Frequency_Log_AW) ~ 1, data = data, 
                   family = gaussian(),
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)
freq_total0 <- brm(scale(COCA_spoken_Frequency_Log_AW) ~ 1 , data = data, 
                   family = gaussian(),
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)
freq_first1 <- brm(scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI_first) , data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)

print(summary(freq_first1), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI_first) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##               Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## Intercept      0.00226   0.15577 -0.30047  0.30766 1.00030    14226    11161
## scaleMI_first -0.32890   0.15700 -0.63796 -0.01812 1.00033    14454    11200
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.98269   0.11760  0.78317  1.24420 0.99990    12395    11725
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(freq_first1)

bayes_R2(freq_first1)
##     Estimate  Est.Error       Q2.5     Q97.5
## R2 0.1189594 0.08090103 0.00194713 0.2950063
conditional_effects(freq_first1)

freq_total1 <- brm(scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI) , data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)

print(summary(freq_total1), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## Intercept  0.00124   0.15679 -0.30896  0.30922 1.00015    14866    10654
## scaleMI   -0.31906   0.15995 -0.63854 -0.00514 1.00072    14089    11442
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.98698   0.11877  0.78851  1.25016 1.00012    13754    11128
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(freq_total1)

bayes_R2(freq_total1)
##     Estimate Est.Error        Q2.5     Q97.5
## R2 0.1137115 0.0803816 0.001334179 0.2931654
conditional_effects(freq_total1)

freq_first2 <- brm(scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI_first) + scale(IndexCOCA_spoken_Frequency_Log_AW) , data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)

print(summary(freq_first2), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI_first) + scale(IndexCOCA_spoken_Frequency_Log_AW) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                                        Estimate Est.Error l-95% CI u-95% CI
## Intercept                              -0.00055   0.14500 -0.29324  0.28609
## scaleMI_first                          -0.28960   0.14667 -0.57818 -0.00182
## scaleIndexCOCA_spoken_Frequency_Log_AW  0.35879   0.14761  0.06758  0.64484
##                                           Rhat Bulk_ESS Tail_ESS
## Intercept                              1.00052    16627    11206
## scaleMI_first                          1.00020    16786    11897
## scaleIndexCOCA_spoken_Frequency_Log_AW 1.00020    16811    11323
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.91792   0.11336  0.72988  1.16895 1.00012    14758    11183
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(freq_first2)

conditional_effects(freq_first2)

bayes_R2(freq_first2)
##     Estimate  Est.Error       Q2.5     Q97.5
## R2 0.2483606 0.09531583 0.06089122 0.4249321
freq_total2 <- brm(scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI) + scale(IndexCOCA_spoken_Frequency_Log_AW), data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)

print(summary(freq_total2), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI) + scale(IndexCOCA_spoken_Frequency_Log_AW) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                                        Estimate Est.Error l-95% CI u-95% CI
## Intercept                              -0.00073   0.14962 -0.29472  0.29264
## scaleMI                                -0.22574   0.15792 -0.53732  0.08636
## scaleIndexCOCA_spoken_Frequency_Log_AW  0.32668   0.15660  0.01652  0.62886
##                                           Rhat Bulk_ESS Tail_ESS
## Intercept                              1.00043    16412    11589
## scaleMI                                1.00011    15052    11056
## scaleIndexCOCA_spoken_Frequency_Log_AW 1.00014    15466    11744
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.93931   0.11214  0.75097  1.19175 0.99997    15055    11395
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(freq_total2)

bayes_R2(freq_total2)
##     Estimate  Est.Error       Q2.5     Q97.5
## R2 0.2170681 0.09273908 0.04170135 0.3916281
conditional_effects(freq_total2)

model_comp(freq_first0, freq_first1, freq_first2)
## [[1]]
##            elpd_diff  se_diff  elpd_loo se_elpd_loo    p_loo  se_p_loo    looic
## Model_2     0.000000 0.000000 -55.46093    6.069648 4.842106 1.9653494 110.9219
## Model_1    -2.148586 2.700473 -57.60951    7.053405 3.893989 1.9614126 115.2190
## Null_Model -3.140519 2.769052 -58.60145    5.523183 2.348885 0.8977871 117.2029
##            se_looic
## Model_2    12.13930
## Model_1    14.10681
## Null_Model 11.04637
## 
## [[2]]
##            elpd_diff  se_diff elpd_waic se_elpd_waic   p_waic se_p_waic
## Model_2     0.000000 0.000000 -55.28399     5.960505 4.665168 1.8440044
## Model_1    -2.244599 2.716633 -57.52859     6.995822 3.813064 1.8991506
## Null_Model -3.288145 2.727782 -58.57214     5.506132 2.319574 0.8782678
##                waic  se_waic
## Model_2    110.5680 11.92101
## Model_1    115.0572 13.99164
## Null_Model 117.1443 11.01226
## 
## [[3]]

## 
## [[4]]

Bayesian stacking

loo_model_weights(freq_first0, freq_first1, freq_first2)
## Method: stacking
## ------
##             weight
## freq_first0 0.000 
## freq_first1 0.177 
## freq_first2 0.823

Averaged posterior

freq_ave <- posterior_average(freq_first1, freq_first2, weights = "stacking")
quantile(freq_ave$b_scaleMI_first, prob = c(.025, .975))
##         2.5%        97.5% 
## -0.592263176 -0.002254059
mcmc_areas(freq_ave, pars = "b_scaleMI_first",  prob = .95)+
  labs(x = "Regression coefficent (std)") 

model_comp(freq_total0, freq_total1, freq_total2)
## [[1]]
##            elpd_diff  se_diff  elpd_loo se_elpd_loo    p_loo  se_p_loo    looic
## Model_2     0.000000 0.000000 -56.18234    5.680600 4.513436 1.6633735 112.3647
## Model_1    -1.289846 2.617130 -57.47218    6.409359 3.449622 1.5875915 114.9444
## Null_Model -2.419109 2.711988 -58.60145    5.523183 2.348885 0.8977871 117.2029
##            se_looic
## Model_2    11.36120
## Model_1    12.81872
## Null_Model 11.04637
## 
## [[2]]
##            elpd_diff  se_diff elpd_waic se_elpd_waic   p_waic se_p_waic
## Model_2     0.000000 0.000000 -56.06766     5.614297 4.398758 1.5881553
## Model_1    -1.313396 2.609087 -57.38106     6.339455 3.358495 1.5109828
## Null_Model -2.504475 2.698814 -58.57214     5.506132 2.319574 0.8782678
##                waic  se_waic
## Model_2    112.1353 11.22859
## Model_1    114.7621 12.67891
## Null_Model 117.1443 11.01226
## 
## [[3]]

## 
## [[4]]

loo_model_weights(freq_total0, freq_total1, freq_total2)
## Method: stacking
## ------
##             weight
## freq_total0 0.000 
## freq_total1 0.273 
## freq_total2 0.727
freq_ave2 <- posterior_average(freq_total1, freq_total2, weights = "stacking")
quantile(freq_ave2$b_scaleMI, prob = c(.025, .975))
##        2.5%       97.5% 
## -0.58073173  0.06741606
ggplot(freq_ave2, aes(x = b_scaleMI, y = ..density..)) +
  geom_density()

mcmc_areas(freq_ave2, pars = "b_scaleMI",  prob = .95)+
  labs(x = "Regression coefficent (std)") 

Range

get_prior(scale(COCA_spoken_Range_Log_AW) ~ scale(MI) + scale(IndexCOCA_spoken_Frequency_Log_AW) , data = data, family = gaussian())
##                    prior     class                                   coef group
## 1                                b                                             
## 2                                b scaleIndexCOCA_spoken_Frequency_Log_AW      
## 3                                b                                scaleMI      
## 4 student_t(3, 0.2, 2.5) Intercept                                             
## 5   student_t(3, 0, 2.5)     sigma                                             
##   resp dpar nlpar bound
## 1                      
## 2                      
## 3                      
## 4                      
## 5
range0 <- brm(scale(COCA_spoken_Range_Log_AW) ~ 1 , data = data, 
                   family = gaussian(),
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)
range1 <- brm(scale(COCA_spoken_Range_Log_AW) ~ scale(MI) , data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = iter1, warmup = 1000, chains = 4, seed =1234)

print(summary(range1), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(COCA_spoken_Range_Log_AW) ~ scale(MI) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## Intercept -0.00113   0.15651 -0.31315  0.30470 1.00007    12618    10709
## scaleMI   -0.30656   0.15719 -0.61919  0.00059 1.00020    14399    10856
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.99008   0.11776  0.79252  1.25177 1.00032    12852    11559
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(range1)

bayes_R2(range1)
##     Estimate  Est.Error        Q2.5     Q97.5
## R2 0.1059115 0.07681136 0.001130617 0.2786825
conditional_effects(range1)

range2 <- brm(scale(COCA_spoken_Range_Log_AW) ~ scale(MI) + scale(IndexCOCA_spoken_Frequency_Log_AW) , data = data, 
                   family = gaussian(),
                    prior = weak_prior,
                    iter = 5000, warmup = 1000, chains = 4, seed =1234)
print(summary(range2), digits = 5)
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: scale(COCA_spoken_Range_Log_AW) ~ scale(MI) + scale(IndexCOCA_spoken_Frequency_Log_AW) 
##    Data: data (Number of observations: 40) 
## Samples: 4 chains, each with iter = 5000; warmup = 1000; thin = 1;
##          total post-warmup samples = 16000
## 
## Population-Level Effects: 
##                                        Estimate Est.Error l-95% CI u-95% CI
## Intercept                               0.00001   0.15525 -0.30441  0.30154
## scaleMI                                -0.24158   0.16041 -0.55621  0.07428
## scaleIndexCOCA_spoken_Frequency_Log_AW  0.21625   0.16248 -0.09985  0.53578
##                                           Rhat Bulk_ESS Tail_ESS
## Intercept                              0.99998    16713    12370
## scaleMI                                1.00009    14839    10816
## scaleIndexCOCA_spoken_Frequency_Log_AW 1.00028    15676    11707
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI    Rhat Bulk_ESS Tail_ESS
## sigma  0.97989   0.11847  0.78025  1.24317 1.00048    14547    11944
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(range2)

bayes_R2(range2)
##     Estimate  Est.Error       Q2.5    Q97.5
## R2 0.1593404 0.08457742 0.01630894 0.331478
conditional_effects(range2)

model_comp(range0, range1, range2)
## [[1]]
##             elpd_diff  se_diff  elpd_loo se_elpd_loo    p_loo  se_p_loo
## Model_1     0.0000000 0.000000 -57.77425    6.692283 3.639436 1.4085823
## Model_2    -0.4653911 2.069652 -58.23964    6.784503 5.158969 1.9704319
## Null_Model -0.8113045 2.161109 -58.58556    5.592533 2.355700 0.8383746
##               looic se_looic
## Model_1    115.5485 13.38457
## Model_2    116.4793 13.56901
## Null_Model 117.1711 11.18507
## 
## [[2]]
##             elpd_diff  se_diff elpd_waic se_elpd_waic   p_waic se_p_waic
## Model_1     0.0000000 0.000000 -57.67642     6.631913 3.541602 1.3405229
## Model_2    -0.4258103 2.042399 -58.10223     6.715009 5.021554 1.8962687
## Null_Model -0.8809419 2.122003 -58.55736     5.577541 2.327504 0.8216292
##                waic  se_waic
## Model_1    115.3528 13.26383
## Model_2    116.2045 13.43002
## Null_Model 117.1147 11.15508
## 
## [[3]]

## 
## [[4]]

### Bayesian stacking

loo_model_weights(range0, range1, range2, model_names = NULL)
## Method: stacking
## ------
##        weight
## range0 0.236 
## range1 0.388 
## range2 0.376

Averaged posterior

range_ave <- posterior_average(range1, range2, weights = "stacking")
quantile(range_ave$b_scaleMI, prob = c(.025, .975))
##        2.5%       97.5% 
## -0.59471863  0.03757488
mcmc_areas(range_ave, pars = "b_scaleMI",  prob = .95)+
  labs(x = "Regression coefficent (std)") 

Sensitivity analysis (Code adapted from Saito et al. [2020])

Defining functions for sensitivity analysis (some parts have to be modified for other dataset)

sa.func <- function(formula) {
  sds <- c(0.8, 0.9, 1:3, 10)
  sa <- vector("list", length(sds) + 1)

  for (i in seq(length(sds))) {
  # SD = 1
  iter <- sds[i]
  student_t.prior <- prior(student_t(3, 0, iter), class = "b")
  stanvars <- stanvar(iter, name = "iter")

  m <- brm(formula = formula, 
           data = data, 
           family = gaussian(),
           prior = student_t.prior,
           iter = iter1, 
           stanvars = stanvars,
           warmup = 1000, 
           chains = 4, 
           seed =1234) 

  sa[[i]] <- mcmc_intervals_data(m)[2,c(5,7,9)]
  }
  ## adding flat prior
  flat.m <- brm(
           formula = formula, 
           data = data, 
           family = gaussian(),
           iter = iter1, 
           stanvars = stanvars,
           warmup = 1000, 
           chains = 4, 
           seed =1234)  
  
  sa[[length(sa)]] <- mcmc_intervals_data(flat.m)[2,c(5,7,9)]

  return(sa)
}

generate.as.df <- function(sa, outcome) {
  sa.df <- sa %>% 
  lapply(data.frame) %>% 
  bind_rows %>% 
  mutate(
    outcome = rep(outcome ,length(sa)),
    prior = c(paste("SD =", c("0.8", "0.9", 1:3, 10)), "Flat")  %>% factor(levels = c(paste("SD =", c("0.8", "0.9", 1:3, 10)), "Flat"))
  ) %>% 
  rename(estimate = 2, lower = 1, upper = 3) %>% 
  mutate(
    lower.posneg = ifelse(lower < 0, 0, 1) %>% factor,
    upper.posneg = ifelse(upper < 0, 0, 1) %>% factor
  ) %>% 
  as_tibble
  return(sa.df)
}

sa.plot <- function(as.df) {
  sa.gg <- ggplot(as.df , aes(prior, estimate)) +
  geom_pointrange(aes(ymin = lower, ymax = upper)) +
  geom_hline(yintercept = 0, linetype = 2, alpha = 0.7) +
  geom_text(aes(y = lower, label = lower %>% round(2) %>% format(nsmall = 2), color = lower.posneg), vjust = 1, size = 3) +
  geom_text(aes(y = upper, label = upper %>% round(2) %>% format(nsmall = 2), color = upper.posneg), vjust = 1, size = 3) +
  scale_y_continuous(breaks = c(-1, 0, 1)) +
  scale_color_manual(values = c('red', 'blue')) +
  coord_flip(ylim = c(-1.2, 1.2)) +
  theme_bw() +
  theme(
    text = element_text(size = 13),
    legend.position = "none"
  ) +
  labs(title = paste("Sensitivity analysis on", as.df$outcome[1]), x = "Prior Distribution", y = "Posterior Mean and 95% Credible Interval")
  return(sa.gg)
}

Fluency ratings

sa.fluency <- sa.func(formula = scale(Fluency) ~ scale(Raw_first))
as.fluency.df <- generate.as.df(sa.fluency, "Fluency rating")
sa.plot(as.fluency.df)

Articulation rate

sa.articulation.rate <- sa.func(formula = scale(ArticulationRate) ~ scale(Raw_first))
as.articulationrate.df <- generate.as.df(sa.fluency, "Articulation Rate")
sa.plot(as.articulationrate.df)

Silent pause ratio

sa.silentpause <- sa.func(formula = scale(SilentPause) ~ scale(Raw_first))
as.silentpause.df <- generate.as.df(sa.silentpause, "Silent pause ratio")
sa.plot(as.silentpause.df)

Filled pause ratio

sa.filledpause <- sa.func(formula = scale(FilledPause) ~ scale(Rawfreq))
as.filledpause.df <- generate.as.df(sa.filledpause, "Filled pause ratio")
sa.plot(as.filledpause.df)

Lexical richness

sa.lexicalrichness <- sa.func(formula = scale(Richness) ~  scale(MI_first) + scale(IndexCOCA_spoken_Frequency_Log_AW))
as.lexicalrichness.df <- generate.as.df(sa.lexicalrichness, "Lexical richness")
sa.plot(as.lexicalrichness.df)

Lexical Frequency

sa.frequency <- sa.func(formula = scale(COCA_spoken_Frequency_Log_AW) ~ scale(MI_first) + scale(IndexCOCA_spoken_Frequency_Log_AW) )
as.frequency.df <- generate.as.df(sa.frequency, "Articulation Rate")
sa.plot(as.frequency.df)

Range

sa.range <- sa.func(formula = scale(COCA_spoken_Range_Log_AW) ~ scale(MI) + scale(IndexCOCA_spoken_Frequency_Log_AW) )
as.range.df <- generate.as.df(sa.range, "Range")
sa.plot(as.range.df)

sa.gg.fluency <- ggplot(as.fluency.df , aes(prior, estimate)) +
  geom_pointrange(aes(ymin = lower, ymax = upper)) +
  geom_hline(yintercept = 0, linetype = 2, alpha = 0.7) +
  geom_text(aes(y = lower, label = lower %>% round(2) %>% format(nsmall = 2), color = lower.posneg), vjust = 1, size = 3) +
  geom_text(aes(y = upper, label = upper %>% round(2) %>% format(nsmall = 2), color = upper.posneg), vjust = 1, size = 3) +
  scale_y_continuous(breaks = c(-1, 0, 1)) +
  scale_color_manual(values = c('red', 'blue')) +
  coord_flip(ylim = c(-1.2, 1.2)) +
  theme_bw() +
  theme(
    text = element_text(size = 13),
    legend.position = "none",
    axis.title = element_blank()
  ) +
  labs(title = 'Fluency rating')


sa.gg.art <- ggplot(as.articulationrate.df , aes(prior, estimate)) +
  geom_pointrange(aes(ymin = lower, ymax = upper)) +
  geom_hline(yintercept = 0, linetype = 2, alpha = 0.7) +
  geom_text(aes(y = lower, label = lower %>% round(2) %>% format(nsmall = 2), color = lower.posneg), vjust = 1, size = 3) +
  geom_text(aes(y = upper, label = upper %>% round(2) %>% format(nsmall = 2), color = upper.posneg), vjust = 1, size = 3) +
  scale_y_continuous(breaks = c(-1, 0, 1)) +
  scale_color_manual(values = c('red', 'blue')) +
  coord_flip(ylim = c(-1.2, 1.2)) +
  theme_bw() +
  theme(
    text = element_text(size = 13),
    legend.position = "none",
    axis.title = element_blank()
  ) +
  labs(title = 'Articulation rate')

sa.gg.silent <- ggplot(as.silentpause.df , aes(prior, estimate)) +
  geom_pointrange(aes(ymin = lower, ymax = upper)) +
  geom_hline(yintercept = 0, linetype = 2, alpha = 0.7) +
  geom_text(aes(y = lower, label = lower %>% round(2) %>% format(nsmall = 2), color = lower.posneg), vjust = 1, size = 3) +
  geom_text(aes(y = upper, label = upper %>% round(2) %>% format(nsmall = 2), color = upper.posneg), vjust = 1, size = 3) +
  scale_y_continuous(breaks = c(-1, 0, 1)) +
  scale_color_manual(values = c('red', 'blue')) +
  coord_flip(ylim = c(-1.2, 1.2)) +
  theme_bw() +
  theme(
    text = element_text(size = 13),
    legend.position = "none",
    axis.title = element_blank()
  )  +
  labs(title = 'Silent pause ratio')


sa.gg.filled <- ggplot(as.filledpause.df , aes(prior, estimate)) +
  geom_pointrange(aes(ymin = lower, ymax = upper)) +
  geom_hline(yintercept = 0, linetype = 2, alpha = 0.7) +
  geom_text(aes(y = lower, label = lower %>% round(2) %>% format(nsmall = 2), color = lower.posneg), vjust = 1, size = 3) +
  geom_text(aes(y = upper, label = upper %>% round(2) %>% format(nsmall = 2), color = upper.posneg), vjust = 1, size = 3) +
  scale_y_continuous(breaks = c(-1, 0, 1)) +
  scale_color_manual(values = c('red', 'blue')) +
  coord_flip(ylim = c(-1.2, 1.2)) +
  theme_bw() +
  theme(
    text = element_text(size = 13),
    legend.position = "none",
    axis.title = element_blank()
  ) +
  labs(title = 'Filled pause ratio')

sa.gg.lexicalrichness<- ggplot(as.lexicalrichness.df , aes(prior, estimate)) +
  geom_pointrange(aes(ymin = lower, ymax = upper)) +
  geom_hline(yintercept = 0, linetype = 2, alpha = 0.7) +
  geom_text(aes(y = lower, label = lower %>% round(2) %>% format(nsmall = 2), color = lower.posneg), vjust = 1, size = 3) +
  geom_text(aes(y = upper, label = upper %>% round(2) %>% format(nsmall = 2), color = upper.posneg), vjust = 1, size = 3) +
  scale_y_continuous(breaks = c(-1, 0, 1)) +
  scale_color_manual(values = c('red', 'blue')) +
  coord_flip(ylim = c(-1.2, 1.2)) +
  theme_bw() +
  theme(
    text = element_text(size = 13),
    legend.position = "none",
    axis.title = element_blank()
  ) +
  labs(title = 'Richness rating')

sa.gg.frequency <- ggplot(as.frequency.df , aes(prior, estimate)) +
  geom_pointrange(aes(ymin = lower, ymax = upper)) +
  geom_hline(yintercept = 0, linetype = 2, alpha = 0.7) +
  geom_text(aes(y = lower, label = lower %>% round(2) %>% format(nsmall = 2), color = lower.posneg), vjust = 1, size = 3) +
  geom_text(aes(y = upper, label = upper %>% round(2) %>% format(nsmall = 2), color = upper.posneg), vjust = 1, size = 3) +
  scale_y_continuous(breaks = c(-1, 0, 1)) +
  scale_color_manual(values = c('red', 'blue')) +
  coord_flip(ylim = c(-1.2, 1.2)) +
  theme_bw() +
  theme(
    text = element_text(size = 13),
    legend.position = "none",
    axis.title = element_blank()
  ) +
  labs(title = 'Frequency')


sa.gg.range <- ggplot(as.range.df , aes(prior, estimate)) +
  geom_pointrange(aes(ymin = lower, ymax = upper)) +
  geom_hline(yintercept = 0, linetype = 2, alpha = 0.7) +
  geom_text(aes(y = lower, label = lower %>% round(2) %>% format(nsmall = 2), color = lower.posneg), vjust = 1, size = 3) +
  geom_text(aes(y = upper, label = upper %>% round(2) %>% format(nsmall = 2), color = upper.posneg), vjust = 1, size = 3) +
  scale_y_continuous(breaks = c(-1, 0, 1)) +
  scale_color_manual(values = c('red', 'blue')) +
  coord_flip(ylim = c(-1.2, 1.2)) +
  theme_bw() +
  theme(
    text = element_text(size = 13),
    legend.position = "none",
    axis.title = element_blank()
  ) +
  labs(title = 'Range')


grid.arrange(sa.gg.fluency,sa.gg.art, sa.gg.silent, sa.gg.filled, sa.gg.lexicalrichness, sa.gg.frequency,sa.gg.range, ncol = 2, 
             top = "Sensitivity analysis",
             left = "Prior Distribution",
             bottom = "Posterior Mean and 95% Credible Interval")